ngram
listlengths
0
82k
[ "= \"CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR,\" + \\", "+ \\ f\"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query)", "{card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an error has been", "NOT EXISTS cards('card_title' VARCHAR,\" + \\ \" 'card_text' TEXT, 'card_link_text'", "card_link_text, card_link_url): con = self.get_connection() cur = con.cursor() create_table_query =", "= self.get_connection() cur = con.cursor() create_table_query = \"CREATE TABLE IF", "create_table_query = \"CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR,\" +", "\" + \\ f\"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try:", "con = self.get_connection() cur = con.cursor() create_table_query = \"CREATE TABLE", "TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query = f\"INSERT INTO", ")\" insert_data_query = f\"INSERT INTO \" + \\ f\"cards VALUES", "'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query = f\"INSERT", "def add_card(self, card_title, card_text, card_link_text, card_link_url): con = self.get_connection() cur", "card_text, card_link_text, card_link_url): con = self.get_connection() cur = con.cursor() create_table_query", "try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an error has been occurred", "get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text, card_link_text, card_link_url): con", "def get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text, card_link_text, card_link_url):", "IF NOT EXISTS cards('card_title' VARCHAR,\" + \\ \" 'card_text' TEXT,", "\" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query =", "cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an error has been occurred !\")", "VARCHAR )\" insert_data_query = f\"INSERT INTO \" + \\ f\"cards", "EXISTS cards('card_title' VARCHAR,\" + \\ \" 'card_text' TEXT, 'card_link_text' VARCHAR,", "insert_data_query = f\"INSERT INTO \" + \\ f\"cards VALUES ({card_title},", "card_link_url): con = self.get_connection() cur = con.cursor() create_table_query = \"CREATE", "'card_link_url' VARCHAR )\" insert_data_query = f\"INSERT INTO \" + \\", "\\ \" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query", "{card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an error has", "VARCHAR,\" + \\ \" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR", "= f\"INSERT INTO \" + \\ f\"cards VALUES ({card_title}, {card_text},", "cards('card_title' VARCHAR,\" + \\ \" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url'", "sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text, card_link_text, card_link_url): con = self.get_connection()", "+ \\ \" 'card_text' TEXT, 'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\"", "\\ f\"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query)", "VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query = f\"INSERT INTO \" +", "{card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an error", "f\"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit()", "return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text, card_link_text, card_link_url): con =", "= con.cursor() create_table_query = \"CREATE TABLE IF NOT EXISTS cards('card_title'", "cur = con.cursor() create_table_query = \"CREATE TABLE IF NOT EXISTS", "add_card(self, card_title, card_text, card_link_text, card_link_url): con = self.get_connection() cur =", "f\"INSERT INTO \" + \\ f\"cards VALUES ({card_title}, {card_text}, {card_link_text},", "'card_link_text' VARCHAR, 'card_link_url' VARCHAR )\" insert_data_query = f\"INSERT INTO \"", "TABLE IF NOT EXISTS cards('card_title' VARCHAR,\" + \\ \" 'card_text'", "class Database: def get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text,", "Database: def get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title, card_text, card_link_text,", "VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except:", "\"CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR,\" + \\ \"", "({card_title}, {card_text}, {card_link_text}, {card_link_url})\" try: cur.execute(create_table_query) cur.execute(insert_data_query) con.commit() except: print(\"an", "INTO \" + \\ f\"cards VALUES ({card_title}, {card_text}, {card_link_text}, {card_link_url})\"", "sqlite3 class Database: def get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self, card_title,", "con.cursor() create_table_query = \"CREATE TABLE IF NOT EXISTS cards('card_title' VARCHAR,\"", "self.get_connection() cur = con.cursor() create_table_query = \"CREATE TABLE IF NOT", "import sqlite3 class Database: def get_connection(self): return sqlite3.connect(\"./db.sqlite\") def add_card(self,", "card_title, card_text, card_link_text, card_link_url): con = self.get_connection() cur = con.cursor()" ]
[ "'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices = [ (c.name, c.name)", "super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence')", "crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c,", "aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description =", "\"\"\" error_css_class = 'error' required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME,", "forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE,", "label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs)", "*args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')] +", "username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO", "import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import", "get_source_names from crits.core.user_tools import get_user_organization from crits.core import form_consts from", "import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes", "= forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id", "import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core import", "adding a Backdoor to CRITs. \"\"\" error_css_class = 'error' required_css_class", "forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False)", "forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args,", "get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def", "crits.core.user_tools import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import", "confidence = cleaned_data.get('confidence') if not confidence or confidence == '':", "(c.name, c.name) for c in get_item_names(Campaign, True)] self.fields['confidence'].choices = [", "required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(),", "True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial =", "if not confidence or confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This", "required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD,", "username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')]", "= forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type", "import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c)", "= forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method", "[('', '')] + [ (c.name, c.name) for c in get_item_names(Campaign,", "import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form,", "label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}),", "get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core import form_consts", "for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django form for", "campaign = cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence') if not", "forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id =", "= cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence') if not confidence", "required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False)", "[ ('', ''), ('low', 'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices", "from crits.core.user_tools import get_user_organization from crits.core import form_consts from crits.vocabulary.relationships", "= [('', '')] + [ (c.name, c.name) for c in", "'medium'), ('high', 'high')] self.fields['source'].choices = [ (c.name, c.name) for c", "c.name) for c in get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username)", "c.name) for c in get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('',", "**kwargs) self.fields['campaign'].choices = [('', '')] + [ (c.name, c.name) for", "'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size':", "'high')] self.fields['source'].choices = [ (c.name, c.name) for c in get_source_names(True,", "self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')] + [ (c.name, c.name)", "version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign =", "= forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference", "label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args,", "super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')] + [ (c.name,", "form for adding a Backdoor to CRITs. \"\"\" error_css_class =", "('low', 'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices = [ (c.name,", "import forms from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign", "error_css_class = 'error' required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True)", "source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False)", "= forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence =", "for c in get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('', ''),", "RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django form for adding a Backdoor", "if campaign: confidence = cleaned_data.get('confidence') if not confidence or confidence", "description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence", "== '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required if campaign", "self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self)", "True)] self.fields['confidence'].choices = [ ('', ''), ('low', 'low'), ('medium', 'medium'),", "Django form for adding a Backdoor to CRITs. \"\"\" error_css_class", "add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign')", "[ (c.name, c.name) for c in get_source_names(True, True, username)] self.fields['source'].initial", "required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES,", "'90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type =", "from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices =", "(c.name, c.name) for c in get_source_names(True, True, username)] self.fields['source'].initial =", "required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False,", "def __init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices =", "in get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices", "= RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm, self).clean()", "= forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION,", "from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from", "get_user_organization from crits.core import form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices", "for adding a Backdoor to CRITs. \"\"\" error_css_class = 'error'", "c in get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices =", "def clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if", "''), ('low', 'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices = [", "label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE,", "forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN,", "RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign", "in get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('', ''), ('low', 'low'),", "\"\"\" Django form for adding a Backdoor to CRITs. \"\"\"", "('medium', 'medium'), ('high', 'high')] self.fields['source'].choices = [ (c.name, c.name) for", "self._errors['confidence'].append(u'This field is required if campaign is specified.') return cleaned_data", "self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm,", "self.fields['confidence'].choices = [ ('', ''), ('low', 'low'), ('medium', 'medium'), ('high',", "forms from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from", "forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class':", "name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version =", "= relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data", "django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import", "or confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required", "RelationshipTypes relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)] class", "CRITs. \"\"\" error_css_class = 'error' required_css_class = 'required' name =", "AddBackdoorForm(forms.Form): \"\"\" Django form for adding a Backdoor to CRITs.", "__init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('',", "ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form", "campaign: confidence = cleaned_data.get('confidence') if not confidence or confidence ==", "forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False)", "from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from", "not confidence or confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field", "= forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select,", "Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names,", "= forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source =", "import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools", "campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source", "= get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self)", "clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if campaign:", "add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization", "source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False,", "to CRITs. \"\"\" error_css_class = 'error' required_css_class = 'required' name", "required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False,", "= 'error' required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases", "required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE,", "= forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def", "confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required if", "= [ (c.name, c.name) for c in get_source_names(True, True, username)]", "import RelationshipTypes relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)]", "required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign", "required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description", "label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference = forms.CharField(widget=forms.TextInput(attrs={'size': '90'}),", "crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for c in", "a Backdoor to CRITs. \"\"\" error_css_class = 'error' required_css_class =", "= forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version = forms.CharField(label=form_consts.Backdoor.VERSION, required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION,", "forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type", "+ [ (c.name, c.name) for c in get_item_names(Campaign, True)] self.fields['confidence'].choices", "'error' required_css_class = 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases =", "related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE)", "crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from", "self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required if campaign is specified.')", "get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial", "class AddBackdoorForm(forms.Form): \"\"\" Django form for adding a Backdoor to", "django import forms from django.forms.utils import ErrorList from crits.campaigns.campaign import", "relationship_choices = [(c, c) for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form):", "[(c, c) for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django", "forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method =", "= 'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False)", "required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username,", "'required' name = forms.CharField(label=form_consts.Backdoor.NAME, required=True) aliases = forms.CharField(label=form_consts.Backdoor.ALIASES, required=False) version", "= [ ('', ''), ('low', 'low'), ('medium', 'medium'), ('high', 'high')]", "c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django form for adding", "from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names", "from django import forms from django.forms.utils import ErrorList from crits.campaigns.campaign", "= cleaned_data.get('confidence') if not confidence or confidence == '': self._errors.setdefault('confidence',", "self.fields['source'].choices = [ (c.name, c.name) for c in get_source_names(True, True,", "label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(),", "self.fields['campaign'].choices = [('', '')] + [ (c.name, c.name) for c", "cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence') if not confidence or", "= forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs): super(AddBackdoorForm,", "'': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is required if campaign is", "confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True)", "relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data =", "Backdoor to CRITs. \"\"\" error_css_class = 'error' required_css_class = 'required'", "('', ''), ('low', 'low'), ('medium', 'medium'), ('high', 'high')] self.fields['source'].choices =", "source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False)", "forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE, required=True) source_method = forms.CharField(label=form_consts.Backdoor.SOURCE_METHOD, required=False) source_reference =", "in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django form for adding a", "for c in get_source_names(True, True, username)] self.fields['source'].initial = get_user_organization(username) self.fields['relationship_type'].choices", "= super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if campaign: confidence =", "self).clean() campaign = cleaned_data.get('campaign') if campaign: confidence = cleaned_data.get('confidence') if", "crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import get_user_organization from crits.core", "cleaned_data.get('confidence') if not confidence or confidence == '': self._errors.setdefault('confidence', ErrorList())", "required=False) confidence = forms.ChoiceField(label=form_consts.Backdoor.CAMPAIGN_CONFIDENCE, required=False) source = forms.ChoiceField(widget=forms.Select(attrs={'class': 'bulknoinitial'}), label=form_consts.Backdoor.SOURCE,", "from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for c", "<gh_stars>10-100 from django import forms from django.forms.utils import ErrorList from", "self.fields['relationship_type'].choices = relationship_choices self.fields['relationship_type'].initial = RelationshipTypes.RELATED_TO add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self):", "form_consts from crits.vocabulary.relationships import RelationshipTypes relationship_choices = [(c, c) for", "widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices", "cleaned_data = super(AddBackdoorForm, self).clean() campaign = cleaned_data.get('campaign') if campaign: confidence", "forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID) related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type =", "forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self,", "= [(c, c) for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\"", "c in get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('', ''), ('low',", "add_bucketlist_to_form(self) add_ticket_to_form(self) def clean(self): cleaned_data = super(AddBackdoorForm, self).clean() campaign =", "add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers import get_item_names, get_source_names from crits.core.user_tools import", "c) for c in RelationshipTypes.values(sort=True)] class AddBackdoorForm(forms.Form): \"\"\" Django form", "[ (c.name, c.name) for c in get_item_names(Campaign, True)] self.fields['confidence'].choices =", "ErrorList()) self._errors['confidence'].append(u'This field is required if campaign is specified.') return", "= forms.CharField(widget=forms.TextInput(attrs={'size': '90'}), label=form_consts.Backdoor.SOURCE_REFERENCE, required=False) related_id = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_ID)", "'')] + [ (c.name, c.name) for c in get_item_names(Campaign, True)]", "confidence or confidence == '': self._errors.setdefault('confidence', ErrorList()) self._errors['confidence'].append(u'This field is", "('high', 'high')] self.fields['source'].choices = [ (c.name, c.name) for c in", "relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'})) def __init__(self, username, *args, **kwargs):", "get_item_names(Campaign, True)] self.fields['confidence'].choices = [ ('', ''), ('low', 'low'), ('medium',", "crits.campaigns.campaign import Campaign from crits.core.forms import add_bucketlist_to_form, add_ticket_to_form from crits.core.handlers", "from django.forms.utils import ErrorList from crits.campaigns.campaign import Campaign from crits.core.forms", "related_type = forms.CharField(widget=forms.HiddenInput(), required=False, label=form_consts.Common.RELATED_TYPE) relationship_type = forms.ChoiceField(required=False, label=form_consts.Common.RELATIONSHIP_TYPE, widget=forms.Select(attrs={'id':'relationship_type'}))", "**kwargs): super(AddBackdoorForm, self).__init__(*args, **kwargs) self.fields['campaign'].choices = [('', '')] + [", "required=False) description = forms.CharField(label=form_consts.Backdoor.DESCRIPTION, required=False) campaign = forms.ChoiceField(widget=forms.Select, label=form_consts.Backdoor.CAMPAIGN, required=False)" ]
[ "\"\"\"Shuts down parallel workers.\"\"\" for remote in self.remotes: remote.send((\"close\", None))", "dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try:", "process crashes, we should not cause things to hang process.start()", "in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for _ in range(self.horizon)]", "in parallel.\"\"\" # This implementation is inspired by Baselines SubprocVecEnv.", "cmd == \"search\": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif", "= Process(target=_worker, args=args) process.daemon = True # If the main", "NOTE: it must not be wrapped in a TimeLimit.\"\"\" if", "horizon, trajectories): \"\"\"Constructs a MonteCarlo instance for env. :param horizon:", "us: _ob, rew, done, _info = self.env.step(u) total_rew += rew", "[self.env.action_space.sample() for _ in range(self.horizon)] total_rew = 0 for u", "and the cumulative reward of the action sequences with the", "super().__init__(horizon, trajectories) self.env = env def seed(self, seed): \"\"\"Sets a", "ABC, abstractmethod from multiprocessing import Pipe, Process import gym from", "while True: state = env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob,", "__init__(self, env, horizon, trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon, trajectories) self.env", ":param monte_carlo(MonteCarlo): a Monte Carlo controller for env or a", "while True: cmd, x = remote.recv() if cmd == \"seed\":", "1) // nremotes + 1 pipes = [Pipe() for _", "for _ in range(self.horizon)] total_rew = 0 for u in", "mjData in consistent state def reset(self): \"\"\"See base class.\"\"\" return", "# put mjData in consistent state def reset(self): \"\"\"See base", "\"\"\"See base class.\"\"\" super().__init__(horizon, trajectories) self.env = env def seed(self,", "not cause things to hang process.start() self.ps.append(process) for remote in", "0 for u in us: _ob, rew, done, _info =", "in us: _ob, rew, done, _info = self.env.step(u) total_rew +=", "a ResettableEnv by random search. See base class for details.", "total_rew)) self.env.set_state(state) best = max(res, key=lambda x: x[1]) return best", "each action sequence is computed, starting from state. The function", "horizon, traj_per_worker) process = Process(target=_worker, args=args) process.daemon = True #", "self.ps: p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding horizon control :param monte_carlo(MonteCarlo):", "cmd == \"seed\": mc.seed(x) elif cmd == \"search\": best_u, best_r", "i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args = (work_remote, remote,", "ceiling of self.trajectories / nworkers traj_per_worker = (self.trajectories - 1)", "sequences. Generates self.trajectories action sequences, each of length self.horizon. The", "= horizon self.trajectories = trajectories @abstractmethod def seed(self, seed): \"\"\"Sets", "[Pipe() for _ in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs", "the action sequences. :param seed (int): a seed.\"\"\" pass @abstractmethod", "or a clone of env. :param env(ResettableEnv): a resettable environment.\"\"\"", "to the nearest multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes =", "state): \"\"\"Returns the best action out of a random search", "reward of the action sequences with the largest cumulative reward.", "of action sequences. Generates self.trajectories action sequences, each of length", "best action out of a random search of action sequences.", "raise TypeError( \"Environment must not have a time limit \"", "are resettable.\"\"\" def __init__(self, env): \"\"\"Wraps a MujocoEnv, adding get_state", "This implementation is not parallelized.\"\"\" def __init__(self, env, horizon, trajectories):", "def reset(self): \"\"\"See base class.\"\"\" return self.env.reset() def step(self, a):", "See base class for details. This implementation is not parallelized.\"\"\"", "for env. :param horizon: the length of the trajectories to", "action sequences.\"\"\" for remote in self.remotes: remote.send((\"search\", state)) results =", "of a random search of action sequences. See base class", "self.sim = env.unwrapped.sim def get_state(self): \"\"\"Serializes the qpos and qvel", "to evaluate. It will be rounded up to the nearest", "env.step(a) yield a, ob, rew, done, info if done: break", "zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps = [] for", "length of trajectories to search over. :param trajectories (int): minimum", "best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics =", "store configuration parameters. :param env_fns (list<()->ResettableEnv>): list of thunks. :param", "state: a value returned by env.get_state(). :return (action, reward): the", "pipes = [Pipe() for _ in range(nremotes)] self.remotes, self.work_remotes =", "the largest cumulative reward. :param state: a value returned by", "sequences of actions. Evaluates each trajectory in the environment, resetting", "action out of a random search of action sequences. See", "It will be rounded up to the nearest multiple of", "Carlo receding horizon control.\"\"\" from abc import ABC, abstractmethod from", "__init__(self, env_fns, horizon, trajectories, seed=0): \"\"\"Launch subprocess workers and store", "remote.recv() if cmd == \"seed\": mc.seed(x) elif cmd == \"search\":", "action sequences with the largest cumulative reward. :param state: a", "rew if done: break res.append((us[0], total_rew)) self.env.set_state(state) best = max(res,", "range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns)", "self.env.set_state(state) us = [self.env.action_space.sample() for _ in range(self.horizon)] total_rew =", "cumulative reward of the action sequences with the largest cumulative", "= MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x =", "subprocess workers and store configuration parameters. :param env_fns (list<()->ResettableEnv>): list", "remote.send((\"seed\", seed + i)) def best_action(self, state): \"\"\"Returns the best", ":param seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns", "sequence is computed, starting from state. The function returns the", "action found and associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an", "action sequences. Generates self.trajectories action sequences, each of length self.horizon.", "a MujocoEnv. NOTE: it must not be wrapped in a", "of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes = len(env_fns) # Integer ceiling", "of the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores", "seed.\"\"\" pass @abstractmethod def best_action(self, state): \"\"\"Returns the best action", "a resettable environment.\"\"\" while True: state = env.get_state() a, _seq_rew", "a MujocoEnv into a ResettableEnv. Note all MuJoCo environments are", "import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper):", "for remote in self.remotes] best = max(results, key=lambda x: x[1])", "parallel workers.\"\"\" for remote in self.remotes: remote.send((\"close\", None)) for p", "i, remote in enumerate(self.remotes): remote.send((\"seed\", seed + i)) def best_action(self,", "MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for a ResettableEnv by random search.", "_seq_rew = monte_carlo.best_action(state) ob, rew, done, info = env.step(a) yield", "dynamic_fn) in enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker)", "i)) def best_action(self, state): \"\"\"Returns the best action out of", "base class.\"\"\" return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an action for", "Randomly samples fixed-length sequences of actions. Evaluates each trajectory in", "function returns the first action and the cumulative reward of", "the best action out of a random search of action", "MonteCarlo(ABC): \"\"\"Selects an action for a ResettableEnv by random search.", "\"\"\"Selects an action for a ResettableEnv by random search. See", "dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc =", "search over. :param trajectories: the number of trajectories to evaluate.\"\"\"", "random search. See base class for details. This implementation is", "= 0 for u in us: _ob, rew, done, _info", "x[1]) return best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close()", "a clone of env. :param env(ResettableEnv): a resettable environment.\"\"\" while", "set_state methods. :param env: a MujocoEnv. NOTE: it must not", "a seed.\"\"\" pass @abstractmethod def best_action(self, state): \"\"\"Returns the best", "(self.trajectories - 1) // nremotes + 1 pipes = [Pipe()", "process.start() self.ps.append(process) for remote in self.work_remotes: remote.close() def seed(self, seed):", "= [] for _ in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample()", "evaluate.\"\"\" self.horizon = horizon self.trajectories = trajectories @abstractmethod def seed(self,", "action sequence is computed, starting from state. The function returns", "monte_carlo.best_action(state) ob, rew, done, info = env.step(a) yield a, ob,", "of length self.horizon. The cumulative reward of each action sequence", "to the original after each trajectory.\"\"\" @abstractmethod def __init__(self, horizon,", "out of a random search of action sequences. See base", "a single environment, which is reset to state before evaluating", "nearest multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes = len(env_fns) #", "env_fns) self.ps = [] for i, (work_remote, remote, dynamic_fn) in", "Process(target=_worker, args=args) process.daemon = True # If the main process", "return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an action for a ResettableEnv", "stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv,", "self.env = env def seed(self, seed): \"\"\"Sets a seed for", "is inspired by Baselines SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories,", "the length of the trajectories to search over. :param trajectories:", "evaluate. It will be rounded up to the nearest multiple", "search of action sequences.\"\"\" for remote in self.remotes: remote.send((\"search\", state))", "of the action sequences with the largest cumulative reward. :param", "= env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob, rew, done, info", "qvel state of the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self,", "gym.Wrapper): \"\"\"Converts a MujocoEnv into a ResettableEnv. Note all MuJoCo", "main process crashes, we should not cause things to hang", "\"\"\"Serializes the qpos and qvel state of the MuJoCo emulator.\"\"\"", "class.\"\"\" return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an action for a", "best = max(results, key=lambda x: x[1]) return best def close(self):", "NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally: dynamics.close() class", "starting from state. The function returns the first action and", "and set_state methods. :param env: a MujocoEnv. NOTE: it must", "ResettableEnv by random search. See base class for details. This", "self.trajectories action sequences, each of length self.horizon. The cumulative reward", "reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for a ResettableEnv", "This implementation is inspired by Baselines SubprocVecEnv. def __init__(self, env_fns,", "_ in range(self.horizon)] total_rew = 0 for u in us:", "derive other values.\"\"\" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() #", "env def seed(self, seed): \"\"\"Sets a seed for the PRNG", "trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon, trajectories) self.env = env def", "a): \"\"\"See base class.\"\"\" return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an", "each of length self.horizon. The cumulative reward of each action", "MuJoCo environments are resettable.\"\"\" def __init__(self, env): \"\"\"Wraps a MujocoEnv,", "res = [] for _ in range(self.trajectories): self.env.set_state(state) us =", "cause things to hang process.start() self.ps.append(process) for remote in self.work_remotes:", "PRNG for the action sequences. :param seed (int): a seed.\"\"\"", "resettable environment.\"\"\" while True: state = env.get_state() a, _seq_rew =", "True: cmd, x = remote.recv() if cmd == \"seed\": mc.seed(x)", "is reset to state before evaluating each action sequence.\"\"\" res", "except KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo):", "process.daemon = True # If the main process crashes, we", "MonteCarlo instance for env. :param horizon: the length of the", "a random search of action sequences. See base class for", "def __init__(self, env_fns, horizon, trajectories, seed=0): \"\"\"Launch subprocess workers and", "limit \" \"(try passing in env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env)", "env or a clone of env. :param env(ResettableEnv): a resettable", "each trajectory.\"\"\" @abstractmethod def __init__(self, horizon, trajectories): \"\"\"Constructs a MonteCarlo", "base class for details. This implementation is not parallelized.\"\"\" def", "for details. This implementation is not parallelized.\"\"\" def __init__(self, env,", "res.append((us[0], total_rew)) self.env.set_state(state) best = max(res, key=lambda x: x[1]) return", "out of a random search of action sequences. Generates self.trajectories", "associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for a", "total_rew += rew if done: break res.append((us[0], total_rew)) self.env.set_state(state) best", "for remote in self.remotes: remote.send((\"search\", state)) results = [remote.recv() for", "def best_action(self, state): \"\"\"Returns the best action out of a", "sequences.\"\"\" for remote in self.remotes: remote.send((\"search\", state)) results = [remote.recv()", "\"\"\"Receding horizon control :param monte_carlo(MonteCarlo): a Monte Carlo controller for", "from aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a", "self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps =", "MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in consistent state", "in env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def", "def seed(self, seed): \"\"\"Sets a seed for the PRNG for", "set_state(self, x): \"\"\"Restores qpos and qvel, calling forward() to derive", "_info = self.env.step(u) total_rew += rew if done: break res.append((us[0],", "seed): \"\"\"See base class.\"\"\" for i, remote in enumerate(self.remotes): remote.send((\"seed\",", "= (self.trajectories - 1) // nremotes + 1 pipes =", "TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment must not have", "\"\"\"Constructs a MonteCarlo instance for env. :param horizon: the length", "seed(self, seed): \"\"\"Sets a seed for the PRNG for the", "of trajectories to search over. :param trajectories (int): minimum number", "self.ps.append(process) for remote in self.work_remotes: remote.close() def seed(self, seed): \"\"\"See", "= len(env_fns) # Integer ceiling of self.trajectories / nworkers traj_per_worker", "random search in parallel.\"\"\" # This implementation is inspired by", "env.unwrapped.sim def get_state(self): \"\"\"Serializes the qpos and qvel state of", "random search of action sequences. See base class for details.", "Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import", "trajectories (int): minimum number of trajectories to evaluate. It will", "sequences, each of length self.horizon. The cumulative reward of each", "the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores qpos", "sequences with the largest cumulative reward. :param state: a value", "MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv into a ResettableEnv. Note all", "# This implementation is inspired by Baselines SubprocVecEnv. def __init__(self,", "KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like", "done, info = env.step(a) yield a, ob, rew, done, info", "\"\"\"Wraps a MujocoEnv, adding get_state and set_state methods. :param env:", "a MujocoEnv, adding get_state and set_state methods. :param env: a", "import ABC, abstractmethod from multiprocessing import Pipe, Process import gym", "a random search of action sequences.\"\"\" for remote in self.remotes:", "for env or a clone of env. :param env(ResettableEnv): a", "by env.get_state(). :return (action, reward): the best action found and", "state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in consistent state def reset(self):", "of a random search of action sequences.\"\"\" for remote in", "instance for env. :param horizon: the length of the trajectories", "zip(self.work_remotes, self.remotes, env_fns) self.ps = [] for i, (work_remote, remote,", "Note all MuJoCo environments are resettable.\"\"\" def __init__(self, env): \"\"\"Wraps", "self.horizon. The cumulative reward of each action sequence is computed,", "def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var()", "__init__(self, horizon, trajectories): \"\"\"Constructs a MonteCarlo instance for env. :param", ":param seed (int): a seed.\"\"\" pass @abstractmethod def best_action(self, state):", "\"search\": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd ==", "not be wrapped in a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise", "minimum number of trajectories to evaluate. It will be rounded", "(int): a seed.\"\"\" pass @abstractmethod def best_action(self, state): \"\"\"Returns the", "max(results, key=lambda x: x[1]) return best def close(self): \"\"\"Shuts down", "try: while True: cmd, x = remote.recv() if cmd ==", "from multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env import", "best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd == \"close\": remote.close()", "abstractmethod from multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env", "env) self.sim = env.unwrapped.sim def get_state(self): \"\"\"Serializes the qpos and", "returned by env.get_state(). :return (action, reward): the best action found", "break else: raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\")", "nremotes + 1 pipes = [Pipe() for _ in range(nremotes)]", "mc.best_action(x) remote.send((best_u, best_r)) elif cmd == \"close\": remote.close() break else:", "to state before evaluating each action sequence.\"\"\" res = []", "parameters. :param env_fns (list<()->ResettableEnv>): list of thunks. :param horizon (int):", "of a random search of action sequences. Generates self.trajectories action", "MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores qpos and", "\"\"\"Selects an action for a ResettableEnv by random search. Randomly", "\"(try passing in env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env) self.sim =", "not have a time limit \" \"(try passing in env.unwrapped", "trajectories to evaluate.\"\"\" self.horizon = horizon self.trajectories = trajectories @abstractmethod", "\"\"\"Converts a MujocoEnv into a ResettableEnv. Note all MuJoCo environments", "If the main process crashes, we should not cause things", "x: x[1]) return best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories):", "in a single environment, which is reset to state before", "self.env.step(u) total_rew += rew if done: break res.append((us[0], total_rew)) self.env.set_state(state)", ":param horizon (int): length of trajectories to search over. :param", "ob, rew, done, info = env.step(a) yield a, ob, rew,", "but performs the random search in parallel.\"\"\" # This implementation", "traj_per_worker = (self.trajectories - 1) // nremotes + 1 pipes", "is not parallelized.\"\"\" def __init__(self, env, horizon, trajectories): \"\"\"See base", "Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco", "must not be wrapped in a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"):", "in self.ps: p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding horizon control :param", "takes place in a single environment, which is reset to", "adding get_state and set_state methods. :param env: a MujocoEnv. NOTE:", "found and associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action", "= zip(self.work_remotes, self.remotes, env_fns) self.ps = [] for i, (work_remote,", "Evaluates each trajectory in the environment, resetting the state to", "receding_horizon(monte_carlo, env): \"\"\"Receding horizon control :param monte_carlo(MonteCarlo): a Monte Carlo", "from abc import ABC, abstractmethod from multiprocessing import Pipe, Process", "clone of env. :param env(ResettableEnv): a resettable environment.\"\"\" while True:", "def __init__(self, horizon, trajectories): \"\"\"Constructs a MonteCarlo instance for env.", "raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally: dynamics.close()", "trajectories to search over. :param trajectories: the number of trajectories", "# If the main process crashes, we should not cause", "to derive other values.\"\"\" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward()", "= mc.best_action(x) remote.send((best_u, best_r)) elif cmd == \"close\": remote.close() break", "args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker,", "a ResettableEnv by random search. Randomly samples fixed-length sequences of", "in consistent state def reset(self): \"\"\"See base class.\"\"\" return self.env.reset()", "key=lambda x: x[1]) return best def close(self): \"\"\"Shuts down parallel", "cumulative reward. :param state: a value returned by env.get_state(). :return", "seed=0): \"\"\"Launch subprocess workers and store configuration parameters. :param env_fns", "over. :param trajectories (int): minimum number of trajectories to evaluate.", "horizon control :param monte_carlo(MonteCarlo): a Monte Carlo controller for env", "of trajectories to evaluate.\"\"\" self.horizon = horizon self.trajectories = trajectories", "for p in self.ps: p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding horizon", "an action for a ResettableEnv by random search. Randomly samples", "finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs the random", "hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment must not have a time", "the number of trajectories to evaluate.\"\"\" self.horizon = horizon self.trajectories", "by random search. See base class for details. This implementation", "\"seed\": mc.seed(x) elif cmd == \"search\": best_u, best_r = mc.best_action(x)", "env. :param env(ResettableEnv): a resettable environment.\"\"\" while True: state =", "qvel, calling forward() to derive other values.\"\"\" state = MujocoState.from_flattened(x,", "random search. Randomly samples fixed-length sequences of actions. Evaluates each", "length self.horizon. The cumulative reward of each action sequence is", ":return (action, reward): the best action found and associated reward.\"\"\"", "\"\"\"Restores qpos and qvel, calling forward() to derive other values.\"\"\"", "it must not be wrapped in a TimeLimit.\"\"\" if hasattr(env,", "class.\"\"\" return self.env.reset() def step(self, a): \"\"\"See base class.\"\"\" return", "the environment, resetting the state to the original after each", "implementation is not parallelized.\"\"\" def __init__(self, env, horizon, trajectories): \"\"\"See", "nremotes = len(env_fns) # Integer ceiling of self.trajectories / nworkers", "class.\"\"\" for i, remote in enumerate(self.remotes): remote.send((\"seed\", seed + i))", "p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding horizon control :param monte_carlo(MonteCarlo): a", "x = remote.recv() if cmd == \"seed\": mc.seed(x) elif cmd", "base class.\"\"\" return self.env.reset() def step(self, a): \"\"\"See base class.\"\"\"", "trajectories) try: while True: cmd, x = remote.recv() if cmd", "cmd == \"close\": remote.close() break else: raise NotImplementedError except KeyboardInterrupt:", "dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True:", "== \"search\": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd", "in self.work_remotes: remote.close() def seed(self, seed): \"\"\"See base class.\"\"\" for", "self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in consistent state def", "\"\"\"Launch subprocess workers and store configuration parameters. :param env_fns (list<()->ResettableEnv>):", "p in self.ps: p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding horizon control", "Monte Carlo controller for env or a clone of env.", "@abstractmethod def best_action(self, state): \"\"\"Returns the best action out of", "dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs the random search", "workers.\"\"\" for remote in self.remotes: remote.send((\"close\", None)) for p in", "None)) for p in self.ps: p.join() def receding_horizon(monte_carlo, env): \"\"\"Receding", "emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores qpos and qvel,", "def get_state(self): \"\"\"Serializes the qpos and qvel state of the", "state to the original after each trajectory.\"\"\" @abstractmethod def __init__(self,", "key=lambda x: x[1]) return best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon,", "of thunks. :param horizon (int): length of trajectories to search", "action sequence.\"\"\" res = [] for _ in range(self.trajectories): self.env.set_state(state)", "env: a MujocoEnv. NOTE: it must not be wrapped in", "x: x[1]) return best def close(self): \"\"\"Shuts down parallel workers.\"\"\"", "resetting the state to the original after each trajectory.\"\"\" @abstractmethod", "a ResettableEnv. Note all MuJoCo environments are resettable.\"\"\" def __init__(self,", "other values.\"\"\" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put", "for the action sequences. :param seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed)", "seed for the PRNG for the action sequences. :param seed", "def set_state(self, x): \"\"\"Restores qpos and qvel, calling forward() to", "from state. The function returns the first action and the", "action sequences. :param seed (int): a seed.\"\"\" pass @abstractmethod def", "trajectories) self.env = env def seed(self, seed): \"\"\"Sets a seed", "= max(res, key=lambda x: x[1]) return best def _worker(remote, parent_remote,", "resettable.\"\"\" def __init__(self, env): \"\"\"Wraps a MujocoEnv, adding get_state and", "is computed, starting from state. The function returns the first", "self.env.reset() def step(self, a): \"\"\"See base class.\"\"\" return self.env.step(a) class", "not parallelized.\"\"\" def __init__(self, env, horizon, trajectories): \"\"\"See base class.\"\"\"", "mc.seed(x) elif cmd == \"search\": best_u, best_r = mc.best_action(x) remote.send((best_u,", "+ 1 pipes = [Pipe() for _ in range(nremotes)] self.remotes,", "= zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps = []", "nworkers traj_per_worker = (self.trajectories - 1) // nremotes + 1", "for a ResettableEnv by random search. Randomly samples fixed-length sequences", "for the action sequences. :param seed (int): a seed.\"\"\" pass", "= monte_carlo.best_action(state) ob, rew, done, info = env.step(a) yield a,", "for a ResettableEnv by random search. See base class for", "__init__(self, env): \"\"\"Wraps a MujocoEnv, adding get_state and set_state methods.", "return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores qpos and qvel, calling", "The cumulative reward of each action sequence is computed, starting", "_worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset()", "the state to the original after each trajectory.\"\"\" @abstractmethod def", "remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker: got", "reward. :param state: a value returned by env.get_state(). :return (action,", "x[1]) return best def close(self): \"\"\"Shuts down parallel workers.\"\"\" for", "before evaluating each action sequence.\"\"\" res = [] for _", "_ob, rew, done, _info = self.env.step(u) total_rew += rew if", "self.work_remotes: remote.close() def seed(self, seed): \"\"\"See base class.\"\"\" for i,", "environment.\"\"\" while True: state = env.get_state() a, _seq_rew = monte_carlo.best_action(state)", "reward): the best action found and associated reward.\"\"\" pass class", "self.remotes: remote.send((\"search\", state)) results = [remote.recv() for remote in self.remotes]", "qpos and qvel state of the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten()", "remote.close() def seed(self, seed): \"\"\"See base class.\"\"\" for i, remote", "(work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker, args=args) process.daemon", "if hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment must not have a", "(int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns the best", "a, _seq_rew = monte_carlo.best_action(state) ob, rew, done, info = env.step(a)", "over. :param trajectories: the number of trajectories to evaluate.\"\"\" self.horizon", "def receding_horizon(monte_carlo, env): \"\"\"Receding horizon control :param monte_carlo(MonteCarlo): a Monte", "which is reset to state before evaluating each action sequence.\"\"\"", "to search over. :param trajectories (int): minimum number of trajectories", "env. :param horizon: the length of the trajectories to search", "for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args = (work_remote,", "sequences. :param seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state):", "state before evaluating each action sequence.\"\"\" res = [] for", "state of the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x):", "// nremotes + 1 pipes = [Pipe() for _ in", "remote.send((\"search\", state)) results = [remote.recv() for remote in self.remotes] best", "in self.remotes: remote.send((\"search\", state)) results = [remote.recv() for remote in", ":param horizon: the length of the trajectories to search over.", "def close(self): \"\"\"Shuts down parallel workers.\"\"\" for remote in self.remotes:", "enumerate(self.remotes): remote.send((\"seed\", seed + i)) def best_action(self, state): \"\"\"Returns the", "args=args) process.daemon = True # If the main process crashes,", "MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs the random search in parallel.\"\"\"", "thunks. :param horizon (int): length of trajectories to search over.", "from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv class", "ResettableEnv. Note all MuJoCo environments are resettable.\"\"\" def __init__(self, env):", "close(self): \"\"\"Shuts down parallel workers.\"\"\" for remote in self.remotes: remote.send((\"close\",", "remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker, args=args) process.daemon =", "= max(results, key=lambda x: x[1]) return best def close(self): \"\"\"Shuts", "in self.remotes: remote.send((\"close\", None)) for p in self.ps: p.join() def", "number of trajectories to evaluate. It will be rounded up", "the first action and the cumulative reward of the action", "aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv", "super().__init__(horizon, trajectories) nremotes = len(env_fns) # Integer ceiling of self.trajectories", "\"Environment must not have a time limit \" \"(try passing", "if done: break res.append((us[0], total_rew)) self.env.set_state(state) best = max(res, key=lambda", "the main process crashes, we should not cause things to", "(int): length of trajectories to search over. :param trajectories (int):", "\"\"\"Monte Carlo receding horizon control.\"\"\" from abc import ABC, abstractmethod", "multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes = len(env_fns) # Integer", "to hang process.start() self.ps.append(process) for remote in self.work_remotes: remote.close() def", "Carlo controller for env or a clone of env. :param", "gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def get_state(self): \"\"\"Serializes the qpos", "class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs the random search in", "= env.step(a) yield a, ob, rew, done, info if done:", "in a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment must", "time limit \" \"(try passing in env.unwrapped instead).\" ) gym.Wrapper.__init__(self,", "[remote.recv() for remote in self.remotes] best = max(results, key=lambda x:", "the trajectories to search over. :param trajectories: the number of", "have a time limit \" \"(try passing in env.unwrapped instead).\"", "base class.\"\"\" super().__init__(horizon, trajectories) self.env = env def seed(self, seed):", "horizon, trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon, trajectories) self.env = env", "control.\"\"\" from abc import ABC, abstractmethod from multiprocessing import Pipe,", "class for details. Search takes place in a single environment,", "= trajectories @abstractmethod def seed(self, seed): \"\"\"Sets a seed for", "u in us: _ob, rew, done, _info = self.env.step(u) total_rew", "parallel.\"\"\" # This implementation is inspired by Baselines SubprocVecEnv. def", "self.remotes] best = max(results, key=lambda x: x[1]) return best def", "down parallel workers.\"\"\" for remote in self.remotes: remote.send((\"close\", None)) for", "MonteCarlo, but performs the random search in parallel.\"\"\" # This", "KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs the", "length of the trajectories to search over. :param trajectories: the", "performs the random search in parallel.\"\"\" # This implementation is", "into a ResettableEnv. Note all MuJoCo environments are resettable.\"\"\" def", "self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an action for a ResettableEnv by", "@abstractmethod def seed(self, seed): \"\"\"Sets a seed for the PRNG", "horizon, trajectories) try: while True: cmd, x = remote.recv() if", "\"close\": remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker:", "state = env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob, rew, done,", "= [self.env.action_space.sample() for _ in range(self.horizon)] total_rew = 0 for", "step(self, a): \"\"\"See base class.\"\"\" return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects", "seed + i)) def best_action(self, state): \"\"\"Returns the best action", "best action found and associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects", "of trajectories to evaluate. It will be rounded up to", "elif cmd == \"search\": best_u, best_r = mc.best_action(x) remote.send((best_u, best_r))", "_ in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes,", "a MonteCarlo instance for env. :param horizon: the length of", "= [remote.recv() for remote in self.remotes] best = max(results, key=lambda", "env, horizon, trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon, trajectories) self.env =", ":param env_fns (list<()->ResettableEnv>): list of thunks. :param horizon (int): length", "horizon (int): length of trajectories to search over. :param trajectories", "reset to state before evaluating each action sequence.\"\"\" res =", "= self.env.step(u) total_rew += rew if done: break res.append((us[0], total_rew))", "random search of action sequences.\"\"\" for remote in self.remotes: remote.send((\"search\",", "process = Process(target=_worker, args=args) process.daemon = True # If the", "traj_per_worker) process = Process(target=_worker, args=args) process.daemon = True # If", "len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes = len(env_fns) # Integer ceiling of", "each trajectory in the environment, resetting the state to the", "state)) results = [remote.recv() for remote in self.remotes] best =", "@abstractmethod def __init__(self, horizon, trajectories): \"\"\"Constructs a MonteCarlo instance for", "enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process =", "for _ in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for _", "[] for _ in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for", "results = [remote.recv() for remote in self.remotes] best = max(results,", "wrapped in a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment", "\"\"\"See base class.\"\"\" return self.env.step(a) class MonteCarlo(ABC): \"\"\"Selects an action", "for u in us: _ob, rew, done, _info = self.env.step(u)", "best_u, best_r = mc.best_action(x) remote.send((best_u, best_r)) elif cmd == \"close\":", "cumulative reward of each action sequence is computed, starting from", "= True # If the main process crashes, we should", "value returned by env.get_state(). :return (action, reward): the best action", "= MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in consistent", "inspired by Baselines SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories, seed=0):", "\"\"\"See base class.\"\"\" return self.env.reset() def step(self, a): \"\"\"See base", "search of action sequences. Generates self.trajectories action sequences, each of", "return best def close(self): \"\"\"Shuts down parallel workers.\"\"\" for remote", "a random search of action sequences. Generates self.trajectories action sequences,", "MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x = remote.recv()", "_ in range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for _ in", "after each trajectory.\"\"\" @abstractmethod def __init__(self, horizon, trajectories): \"\"\"Constructs a", "Baselines SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories, seed=0): \"\"\"Launch subprocess", "the random search in parallel.\"\"\" # This implementation is inspired", "remote.send((best_u, best_r)) elif cmd == \"close\": remote.close() break else: raise", "True: state = env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob, rew,", "for i, remote in enumerate(self.remotes): remote.send((\"seed\", seed + i)) def", "the action sequences with the largest cumulative reward. :param state:", "to evaluate.\"\"\" self.horizon = horizon self.trajectories = trajectories @abstractmethod def", "def seed(self, seed): \"\"\"See base class.\"\"\" for i, remote in", "action sequences. See base class for details. Search takes place", "info = env.step(a) yield a, ob, rew, done, info if", "= [] for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args", "search in parallel.\"\"\" # This implementation is inspired by Baselines", "state. The function returns the first action and the cumulative", "1 pipes = [Pipe() for _ in range(nremotes)] self.remotes, self.work_remotes", "environment, which is reset to state before evaluating each action", "for remote in self.work_remotes: remote.close() def seed(self, seed): \"\"\"See base", "best_action(self, state): \"\"\"Returns the best action out of a random", "self.trajectories / nworkers traj_per_worker = (self.trajectories - 1) // nremotes", "import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv into", "remote, dynamic_fn) in enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon,", "implementation is inspired by Baselines SubprocVecEnv. def __init__(self, env_fns, horizon,", "action out of a random search of action sequences.\"\"\" for", "be rounded up to the nearest multiple of len(make_env).\"\"\" super().__init__(horizon,", "rew, done, _info = self.env.step(u) total_rew += rew if done:", "= [Pipe() for _ in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes)", "= env.unwrapped.sim def get_state(self): \"\"\"Serializes the qpos and qvel state", "values.\"\"\" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData", "configuration parameters. :param env_fns (list<()->ResettableEnv>): list of thunks. :param horizon", "for details. Search takes place in a single environment, which", "in enumerate(self.remotes): remote.send((\"seed\", seed + i)) def best_action(self, state): \"\"\"Returns", "largest cumulative reward. :param state: a value returned by env.get_state().", "done: break res.append((us[0], total_rew)) self.env.set_state(state) best = max(res, key=lambda x:", "env_fns, horizon, trajectories, seed=0): \"\"\"Launch subprocess workers and store configuration", "# Integer ceiling of self.trajectories / nworkers traj_per_worker = (self.trajectories", "MujocoEnv. NOTE: it must not be wrapped in a TimeLimit.\"\"\"", "worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps = [] for i,", "be wrapped in a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise TypeError(", "pass @abstractmethod def best_action(self, state): \"\"\"Returns the best action out", "best_r)) elif cmd == \"close\": remote.close() break else: raise NotImplementedError", "of the trajectories to search over. :param trajectories: the number", "horizon self.trajectories = trajectories @abstractmethod def seed(self, seed): \"\"\"Sets a", "- 1) // nremotes + 1 pipes = [Pipe() for", "each action sequence.\"\"\" res = [] for _ in range(self.trajectories):", "trajectories @abstractmethod def seed(self, seed): \"\"\"Sets a seed for the", "action sequences. :param seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self,", "got KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but performs", "best = max(res, key=lambda x: x[1]) return best def _worker(remote,", "action and the cumulative reward of the action sequences with", "print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo,", "\" \"(try passing in env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env) self.sim", "random search of action sequences. Generates self.trajectories action sequences, each", "control :param monte_carlo(MonteCarlo): a Monte Carlo controller for env or", "remote in self.remotes: remote.send((\"search\", state)) results = [remote.recv() for remote", "of env. :param env(ResettableEnv): a resettable environment.\"\"\" while True: state", "range(self.trajectories): self.env.set_state(state) us = [self.env.action_space.sample() for _ in range(self.horizon)] total_rew", "search over. :param trajectories (int): minimum number of trajectories to", "trajectory.\"\"\" @abstractmethod def __init__(self, horizon, trajectories): \"\"\"Constructs a MonteCarlo instance", "x): \"\"\"Restores qpos and qvel, calling forward() to derive other", "break res.append((us[0], total_rew)) self.env.set_state(state) best = max(res, key=lambda x: x[1])", "(list<()->ResettableEnv>): list of thunks. :param horizon (int): length of trajectories", "computed, starting from state. The function returns the first action", "trajectories) nremotes = len(env_fns) # Integer ceiling of self.trajectories /", "search of action sequences. See base class for details. Search", "self.ps = [] for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs):", "env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def get_state(self):", "put mjData in consistent state def reset(self): \"\"\"See base class.\"\"\"", "= remote.recv() if cmd == \"seed\": mc.seed(x) elif cmd ==", "with the largest cumulative reward. :param state: a value returned", "an action for a ResettableEnv by random search. See base", "receding horizon control.\"\"\" from abc import ABC, abstractmethod from multiprocessing", ":param trajectories: the number of trajectories to evaluate.\"\"\" self.horizon =", "True # If the main process crashes, we should not", "<filename>src/aprl/agents/monte_carlo.py \"\"\"Monte Carlo receding horizon control.\"\"\" from abc import ABC,", "self.horizon = horizon self.trajectories = trajectories @abstractmethod def seed(self, seed):", "trajectories to evaluate. It will be rounded up to the", "seed): \"\"\"Sets a seed for the PRNG for the action", "forward() to derive other values.\"\"\" state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data)", "us = [self.env.action_space.sample() for _ in range(self.horizon)] total_rew = 0", "env): \"\"\"Wraps a MujocoEnv, adding get_state and set_state methods. :param", "Generates self.trajectories action sequences, each of length self.horizon. The cumulative", "elif cmd == \"close\": remote.close() break else: raise NotImplementedError except", "details. Search takes place in a single environment, which is", "returns the first action and the cumulative reward of the", "sequences. :param seed (int): a seed.\"\"\" pass @abstractmethod def best_action(self,", "return best def _worker(remote, parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics", "qpos and qvel, calling forward() to derive other values.\"\"\" state", "+= rew if done: break res.append((us[0], total_rew)) self.env.set_state(state) best =", "(work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn),", "workers and store configuration parameters. :param env_fns (list<()->ResettableEnv>): list of", "sequences. See base class for details. Search takes place in", "env.get_state(). :return (action, reward): the best action found and associated", "horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics,", "crashes, we should not cause things to hang process.start() self.ps.append(process)", "action out of a random search of action sequences. Generates", "rew, done, info = env.step(a) yield a, ob, rew, done,", "def __init__(self, env, horizon, trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon, trajectories)", "best action out of a random search of action sequences.\"\"\"", "in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes,", "of each action sequence is computed, starting from state. The", "class MonteCarlo(ABC): \"\"\"Selects an action for a ResettableEnv by random", "trajectories, seed=0): \"\"\"Launch subprocess workers and store configuration parameters. :param", "CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker, args=args) process.daemon = True", "trajectory in the environment, resetting the state to the original", "place in a single environment, which is reset to state", "abc import ABC, abstractmethod from multiprocessing import Pipe, Process import", "action for a ResettableEnv by random search. See base class", "MujocoState.from_mjdata(self.sim.data).flatten() def set_state(self, x): \"\"\"Restores qpos and qvel, calling forward()", "the qpos and qvel state of the MuJoCo emulator.\"\"\" return", "get_state(self): \"\"\"Serializes the qpos and qvel state of the MuJoCo", "max(res, key=lambda x: x[1]) return best def _worker(remote, parent_remote, dynamic_fn_wrapper,", "in enumerate(worker_cfgs): args = (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process", "remote in self.work_remotes: remote.close() def seed(self, seed): \"\"\"See base class.\"\"\"", "See base class for details. Search takes place in a", "remote in self.remotes: remote.send((\"close\", None)) for p in self.ps: p.join()", "environment, resetting the state to the original after each trajectory.\"\"\"", "self.sim.forward() # put mjData in consistent state def reset(self): \"\"\"See", "get_state and set_state methods. :param env: a MujocoEnv. NOTE: it", "consistent state def reset(self): \"\"\"See base class.\"\"\" return self.env.reset() def", "MujocoEnv into a ResettableEnv. Note all MuJoCo environments are resettable.\"\"\"", "for the PRNG for the action sequences. :param seed (int):", "list of thunks. :param horizon (int): length of trajectories to", "done, _info = self.env.step(u) total_rew += rew if done: break", "sequence.\"\"\" res = [] for _ in range(self.trajectories): self.env.set_state(state) us", "to search over. :param trajectories: the number of trajectories to", "(action, reward): the best action found and associated reward.\"\"\" pass", "== \"close\": remote.close() break else: raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel", "MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv into a", "(int): minimum number of trajectories to evaluate. It will be", ") gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def get_state(self): \"\"\"Serializes the", "ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv into a ResettableEnv.", "environments are resettable.\"\"\" def __init__(self, env): \"\"\"Wraps a MujocoEnv, adding", "reward of each action sequence is computed, starting from state.", "class.\"\"\" super().__init__(horizon, trajectories) self.env = env def seed(self, seed): \"\"\"Sets", "The function returns the first action and the cumulative reward", "reset(self): \"\"\"See base class.\"\"\" return self.env.reset() def step(self, a): \"\"\"See", "evaluating each action sequence.\"\"\" res = [] for _ in", "of self.trajectories / nworkers traj_per_worker = (self.trajectories - 1) //", "self.trajectories = trajectories @abstractmethod def seed(self, seed): \"\"\"Sets a seed", "env(ResettableEnv): a resettable environment.\"\"\" while True: state = env.get_state() a,", "state = MujocoState.from_flattened(x, self.sim) state.set_mjdata(self.sim.data) self.sim.forward() # put mjData in", "actions. Evaluates each trajectory in the environment, resetting the state", "by Baselines SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories, seed=0): \"\"\"Launch", "pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for a ResettableEnv by", "worker: got KeyboardInterrupt\") finally: dynamics.close() class MonteCarloParallel(MonteCarlo): \"\"\"Like MonteCarlo, but", "+ i)) def best_action(self, state): \"\"\"Returns the best action out", "self.remotes: remote.send((\"close\", None)) for p in self.ps: p.join() def receding_horizon(monte_carlo,", "len(env_fns) # Integer ceiling of self.trajectories / nworkers traj_per_worker =", ":param env: a MujocoEnv. NOTE: it must not be wrapped", "of actions. Evaluates each trajectory in the environment, resetting the", "in the environment, resetting the state to the original after", "details. This implementation is not parallelized.\"\"\" def __init__(self, env, horizon,", "of action sequences.\"\"\" for remote in self.remotes: remote.send((\"search\", state)) results", "and qvel, calling forward() to derive other values.\"\"\" state =", "== \"seed\": mc.seed(x) elif cmd == \"search\": best_u, best_r =", "MujocoEnv, adding get_state and set_state methods. :param env: a MujocoEnv.", "remote in self.remotes] best = max(results, key=lambda x: x[1]) return", "by random search. Randomly samples fixed-length sequences of actions. Evaluates", "search. See base class for details. This implementation is not", "seed(self, seed): \"\"\"See base class.\"\"\" for i, remote in enumerate(self.remotes):", "should not cause things to hang process.start() self.ps.append(process) for remote", "base class for details. Search takes place in a single", "\"\"\"Like MonteCarlo, but performs the random search in parallel.\"\"\" #", "total_rew = 0 for u in us: _ob, rew, done,", "a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns the best action", "import gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState,", "\"_max_episode_steps\"): raise TypeError( \"Environment must not have a time limit", "the PRNG for the action sequences. :param seed (int): a", "import Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper from", "[] for i, (work_remote, remote, dynamic_fn) in enumerate(worker_cfgs): args =", "mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd, x", "SubprocVecEnv. def __init__(self, env_fns, horizon, trajectories, seed=0): \"\"\"Launch subprocess workers", "trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon,", "gym from stable_baselines.common.vec_env import CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv", "horizon, trajectories, seed=0): \"\"\"Launch subprocess workers and store configuration parameters.", "= (work_remote, remote, CloudpickleWrapper(dynamic_fn), horizon, traj_per_worker) process = Process(target=_worker, args=args)", "number of trajectories to evaluate.\"\"\" self.horizon = horizon self.trajectories =", "the action sequences. :param seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def", ":param trajectories (int): minimum number of trajectories to evaluate. It", "seed (int): a seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns the", "action for a ResettableEnv by random search. Randomly samples fixed-length", "def __init__(self, env): \"\"\"Wraps a MujocoEnv, adding get_state and set_state", "a seed for the PRNG for the action sequences. :param", "single environment, which is reset to state before evaluating each", "class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts a MujocoEnv into a ResettableEnv. Note", "samples fixed-length sequences of actions. Evaluates each trajectory in the", "horizon: the length of the trajectories to search over. :param", "trajectories: the number of trajectories to evaluate.\"\"\" self.horizon = horizon", "parallelized.\"\"\" def __init__(self, env, horizon, trajectories): \"\"\"See base class.\"\"\" super().__init__(horizon,", "for _ in range(nremotes)] self.remotes, self.work_remotes = zip(*pipes) worker_cfgs =", "best def close(self): \"\"\"Shuts down parallel workers.\"\"\" for remote in", "out of a random search of action sequences.\"\"\" for remote", "Integer ceiling of self.trajectories / nworkers traj_per_worker = (self.trajectories -", "controller for env or a clone of env. :param env(ResettableEnv):", "trajectories to search over. :param trajectories (int): minimum number of", "\"\"\"Returns the best action out of a random search of", "will be rounded up to the nearest multiple of len(make_env).\"\"\"", "calling forward() to derive other values.\"\"\" state = MujocoState.from_flattened(x, self.sim)", "the best action found and associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo):", "methods. :param env: a MujocoEnv. NOTE: it must not be", "self.remotes, self.work_remotes = zip(*pipes) worker_cfgs = zip(self.work_remotes, self.remotes, env_fns) self.ps", "the original after each trajectory.\"\"\" @abstractmethod def __init__(self, horizon, trajectories):", "and associated reward.\"\"\" pass class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for", "seed.\"\"\" self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns the best action out", ":param state: a value returned by env.get_state(). :return (action, reward):", "class for details. This implementation is not parallelized.\"\"\" def __init__(self,", "trajectories): \"\"\"Constructs a MonteCarlo instance for env. :param horizon: the", "self.env.action_space.np_random.seed(seed) def best_action(self, state): \"\"\"Returns the best action out of", "else: raise NotImplementedError except KeyboardInterrupt: print(\"MonteCarloParallel worker: got KeyboardInterrupt\") finally:", "self.remotes, env_fns) self.ps = [] for i, (work_remote, remote, dynamic_fn)", "\"\"\"See base class.\"\"\" for i, remote in enumerate(self.remotes): remote.send((\"seed\", seed", "env): \"\"\"Receding horizon control :param monte_carlo(MonteCarlo): a Monte Carlo controller", ":param env(ResettableEnv): a resettable environment.\"\"\" while True: state = env.get_state()", "passing in env.unwrapped instead).\" ) gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim", "remote in enumerate(self.remotes): remote.send((\"seed\", seed + i)) def best_action(self, state):", "in self.remotes] best = max(results, key=lambda x: x[1]) return best", "the nearest multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes = len(env_fns)", "must not have a time limit \" \"(try passing in", "first action and the cumulative reward of the action sequences", "parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories)", "things to hang process.start() self.ps.append(process) for remote in self.work_remotes: remote.close()", "hang process.start() self.ps.append(process) for remote in self.work_remotes: remote.close() def seed(self,", "/ nworkers traj_per_worker = (self.trajectories - 1) // nremotes +", "search. Randomly samples fixed-length sequences of actions. Evaluates each trajectory", "the cumulative reward of the action sequences with the largest", "Search takes place in a single environment, which is reset", "self.env.set_state(state) best = max(res, key=lambda x: x[1]) return best def", "fixed-length sequences of actions. Evaluates each trajectory in the environment,", "\"\"\"Sets a seed for the PRNG for the action sequences.", "a Monte Carlo controller for env or a clone of", "env.get_state() a, _seq_rew = monte_carlo.best_action(state) ob, rew, done, info =", "return self.env.reset() def step(self, a): \"\"\"See base class.\"\"\" return self.env.step(a)", "def step(self, a): \"\"\"See base class.\"\"\" return self.env.step(a) class MonteCarlo(ABC):", "monte_carlo(MonteCarlo): a Monte Carlo controller for env or a clone", "remote.send((\"close\", None)) for p in self.ps: p.join() def receding_horizon(monte_carlo, env):", "class MonteCarloSingle(MonteCarlo): \"\"\"Selects an action for a ResettableEnv by random", "env_fns (list<()->ResettableEnv>): list of thunks. :param horizon (int): length of", "horizon control.\"\"\" from abc import ABC, abstractmethod from multiprocessing import", "multiprocessing import Pipe, Process import gym from stable_baselines.common.vec_env import CloudpickleWrapper", "all MuJoCo environments are resettable.\"\"\" def __init__(self, env): \"\"\"Wraps a", "seed (int): a seed.\"\"\" pass @abstractmethod def best_action(self, state): \"\"\"Returns", "in range(self.horizon)] total_rew = 0 for u in us: _ob,", "a value returned by env.get_state(). :return (action, reward): the best", "a TimeLimit.\"\"\" if hasattr(env, \"_max_episode_steps\"): raise TypeError( \"Environment must not", "and store configuration parameters. :param env_fns (list<()->ResettableEnv>): list of thunks.", "action sequences, each of length self.horizon. The cumulative reward of", "a time limit \" \"(try passing in env.unwrapped instead).\" )", "if cmd == \"seed\": mc.seed(x) elif cmd == \"search\": best_u,", "for remote in self.remotes: remote.send((\"close\", None)) for p in self.ps:", "and qvel state of the MuJoCo emulator.\"\"\" return MujocoState.from_mjdata(self.sim.data).flatten() def", "base class.\"\"\" for i, remote in enumerate(self.remotes): remote.send((\"seed\", seed +", "of action sequences. See base class for details. Search takes", "original after each trajectory.\"\"\" @abstractmethod def __init__(self, horizon, trajectories): \"\"\"Constructs", "range(self.horizon)] total_rew = 0 for u in us: _ob, rew,", "rounded up to the nearest multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories)", "instead).\" ) gym.Wrapper.__init__(self, env) self.sim = env.unwrapped.sim def get_state(self): \"\"\"Serializes", "state def reset(self): \"\"\"See base class.\"\"\" return self.env.reset() def step(self,", "cmd, x = remote.recv() if cmd == \"seed\": mc.seed(x) elif", "CloudpickleWrapper from aprl.common.mujoco import MujocoState, ResettableEnv class MujocoResettableWrapper(ResettableEnv, gym.Wrapper): \"\"\"Converts", "we should not cause things to hang process.start() self.ps.append(process) for", "parent_remote, dynamic_fn_wrapper, horizon, trajectories): parent_remote.close() dynamics = dynamic_fn_wrapper.var() dynamics.reset() mc", "dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while True: cmd,", "ResettableEnv by random search. Randomly samples fixed-length sequences of actions.", "TypeError( \"Environment must not have a time limit \" \"(try", "up to the nearest multiple of len(make_env).\"\"\" super().__init__(horizon, trajectories) nremotes", "= dynamic_fn_wrapper.var() dynamics.reset() mc = MonteCarloSingle(dynamics, horizon, trajectories) try: while", "= env def seed(self, seed): \"\"\"Sets a seed for the" ]
[ "p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num /", "[0, 1, 0, 1, 0, 1] # 1 is abusive,", "'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop',", "'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute',", "'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],", "if __name__ == '__main__': postinList, classVec = loadDataSet() myVocabList =", "p0Denom return p0Vect, p1Vect, pAbusive if __name__ == '__main__': postinList,", "'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park',", "trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V,", "'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate',", "| set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec = [0]", "def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive", "__name__ == '__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList)", "is abusive, 0 not return postingList, classVec def createVocabList(dataSet): vocabSet", "'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to',", "setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList) for word in", "= vocabSet | set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec", "createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in", "not in my vocabulary' % word) return returnVec def trainNB0(trainMatrix,", "trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive =", "'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food',", "vocabSet = vocabSet | set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet):", "set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec = [0] *", "vocabSet = set([]) for document in dataSet: vocabSet = vocabSet", "set([]) for document in dataSet: vocabSet = vocabSet | set(document)", "/ p0Denom return p0Vect, p1Vect, pAbusive if __name__ == '__main__':", "returnVec = [0] * len(vocabList) for word in inputSet: if", "for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] =", "postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......]", "pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords) p1Num =", "vocabSet | set(document) return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec =", "'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0,", "my vocabulary' % word) return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs", "['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0, 1,", "0 not return postingList, classVec def createVocabList(dataSet): vocabSet = set([])", "+= sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect", "# 1 is abusive, 0 not return postingList, classVec def", "1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num +=", "# print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in postinList:", "'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my',", "'dog', 'food', 'stupid']] classVec = [0, 1, 0, 1, 0,", "'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],", "if word in vocabList: returnVec[vocabList.index(word)] = 1 else: print('the word:", "'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]", "float(numTrainDocs) p0Num = np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom = 0.0", "p1Vect, pAbusive if __name__ == '__main__': postinList, classVec = loadDataSet()", "'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],", "vocabulary' % word) return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs =", "return p0Vect, p1Vect, pAbusive if __name__ == '__main__': postinList, classVec", "return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords =", "vocabList: returnVec[vocabList.index(word)] = 1 else: print('the word: %s is not", "1, 0, 1] # 1 is abusive, 0 not return", "trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num / p1Denom p0Vect", "def setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList) for word", "= 0.0 p1Denom = 0.0 for i in range(numTrainDocs): if", "<filename>machineLearnInAction/bayes.py import numpy as np def loadDataSet(): postingList = [['my',", "as np def loadDataSet(): postingList = [['my', 'dog', 'has', 'flea',", "= [0] * len(vocabList) for word in inputSet: if word", "dataSet: vocabSet = vocabSet | set(document) return list(vocabSet) def setOfWords2Vec(vocabList,", "= np.zeros(numWords) p0Denom = 0.0 p1Denom = 0.0 for i", "pAbusive if __name__ == '__main__': postinList, classVec = loadDataSet() myVocabList", "'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop',", "for document in dataSet: vocabSet = vocabSet | set(document) return", "postingList, classVec def createVocabList(dataSet): vocabSet = set([]) for document in", "= [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe',", "'buying', 'worthless', 'dog', 'food', 'stupid']] classVec = [0, 1, 0,", "trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else:", "[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not',", "abusive, 0 not return postingList, classVec def createVocabList(dataSet): vocabSet =", "%s is not in my vocabulary' % word) return returnVec", "postinList[0])) trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc))", "print('the word: %s is not in my vocabulary' % word)", "numpy as np def loadDataSet(): postingList = [['my', 'dog', 'has',", "createVocabList(dataSet): vocabSet = set([]) for document in dataSet: vocabSet =", "in range(numTrainDocs): if trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom", "'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying',", "loadDataSet(): postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],", "is not in my vocabulary' % word) return returnVec def", "print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList,", "postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V, p1V,", "p0Denom = 0.0 p1Denom = 0.0 for i in range(numTrainDocs):", "['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit',", "= [0, 1, 0, 1, 0, 1] # 1 is", "'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him',", "in my vocabulary' % word) return returnVec def trainNB0(trainMatrix, trainCategory):", "= p1Num / p1Denom p0Vect = p0Num / p0Denom return", "'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love',", "'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec", "myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for", "'food', 'stupid']] classVec = [0, 1, 0, 1, 0, 1]", "not return postingList, classVec def createVocabList(dataSet): vocabSet = set([]) for", "1 is abusive, 0 not return postingList, classVec def createVocabList(dataSet):", "trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory)", "sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect =", "+= trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num / p1Denom", "p1Vect = p1Num / p1Denom p0Vect = p0Num / p0Denom", "in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat,", "classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat", "p1Num = np.zeros(numWords) p0Denom = 0.0 p1Denom = 0.0 for", "= sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords) p1Num = np.zeros(numWords)", "sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom", "+= sum(trainMatrix[i]) p1Vect = p1Num / p1Denom p0Vect = p0Num", "document in dataSet: vocabSet = vocabSet | set(document) return list(vocabSet)", "% word) return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix)", "returnVec[vocabList.index(word)] = 1 else: print('the word: %s is not in", "= 1 else: print('the word: %s is not in my", "0.0 for i in range(numTrainDocs): if trainCategory[i] == 1: p1Num", "0, 1] # 1 is abusive, 0 not return postingList,", "'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless',", "== '__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) #", "np def loadDataSet(): postingList = [['my', 'dog', 'has', 'flea', 'problems',", "postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb =", "'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to', 'dog',", "#[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my',", "'__main__': postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList,", "classVec def createVocabList(dataSet): vocabSet = set([]) for document in dataSet:", "sum(trainMatrix[i]) p1Vect = p1Num / p1Denom p0Vect = p0Num /", "p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i]", "'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak', 'how',", "'worthless', 'dog', 'food', 'stupid']] classVec = [0, 1, 0, 1,", "0, 1, 0, 1] # 1 is abusive, 0 not", "p0Denom += sum(trainMatrix[i]) p1Vect = p1Num / p1Denom p0Vect =", "def loadDataSet(): postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help',", "['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks', 'ate', 'my', 'steak',", "p1Denom = 0.0 for i in range(numTrainDocs): if trainCategory[i] ==", "1] # 1 is abusive, 0 not return postingList, classVec", "'stop', 'him'], ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']] classVec =", "'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'],", "returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0])", "= [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V,", "p0Vect = p0Num / p0Denom return p0Vect, p1Vect, pAbusive if", "['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting',", "word in vocabList: returnVec[vocabList.index(word)] = 1 else: print('the word: %s", "word) return returnVec def trainNB0(trainMatrix, trainCategory): numTrainDocs = len(trainMatrix) numWords", "'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take', 'him', 'to',", "for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb", "np.zeros(numWords) p0Denom = 0.0 p1Denom = 0.0 for i in", "1 else: print('the word: %s is not in my vocabulary'", "trainMat = [] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat)", "p0Num / p0Denom return p0Vect, p1Vect, pAbusive if __name__ ==", "word in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1", "'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so',", "if trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i])", "list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList) for", "= 0.0 for i in range(numTrainDocs): if trainCategory[i] == 1:", "p1Denom p0Vect = p0Num / p0Denom return p0Vect, p1Vect, pAbusive", "classVec = [0, 1, 0, 1, 0, 1] # 1", "numTrainDocs = len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory) /", "postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec)", "'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is', 'so', 'cute', 'I',", "p0Vect, p1Vect, pAbusive if __name__ == '__main__': postinList, classVec =", "'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid',", "'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless',", "p1Num / p1Denom p0Vect = p0Num / p0Denom return p0Vect,", "= len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords)", "'my', 'steak', 'how', 'to', 'stop', 'him'], ['quit', 'buying', 'worthless', 'dog',", "in vocabList: returnVec[vocabList.index(word)] = 1 else: print('the word: %s is", "[] for postinDoc in postinList: trainMat.append(setOfWords2Vec(myVocabList, postinDoc)) print(trainMat) p0V, p1V,", "= loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat =", "import numpy as np def loadDataSet(): postingList = [['my', 'dog',", "len(vocabList) for word in inputSet: if word in vocabList: returnVec[vocabList.index(word)]", "'dog', 'has', 'flea', 'problems', 'help', 'please'], #[0,0,1,1,1......] ['maybe', 'not', 'take',", "== 1: p1Num += trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num", "in inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 else:", "p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i])", "range(numTrainDocs): if trainCategory[i] == 1: p1Num += trainMatrix[i] p1Denom +=", "inputSet): returnVec = [0] * len(vocabList) for word in inputSet:", "/ p1Denom p0Vect = p0Num / p0Denom return p0Vect, p1Vect,", "else: print('the word: %s is not in my vocabulary' %", "= np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom = 0.0 p1Denom =", "for i in range(numTrainDocs): if trainCategory[i] == 1: p1Num +=", "= p0Num / p0Denom return p0Vect, p1Vect, pAbusive if __name__", "inputSet: if word in vocabList: returnVec[vocabList.index(word)] = 1 else: print('the", "= set([]) for document in dataSet: vocabSet = vocabSet |", "+= trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom", "'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation', 'is',", "'cute', 'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr',", "= createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = [] for postinDoc", "p0Num = np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom = 0.0 p1Denom", "postinList, classVec = loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0]))", "1, 0, 1, 0, 1] # 1 is abusive, 0", "return postingList, classVec def createVocabList(dataSet): vocabSet = set([]) for document", "= len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs)", "in dataSet: vocabSet = vocabSet | set(document) return list(vocabSet) def", "['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], ['my', 'dalmation',", "trainMatrix[i] p1Denom += sum(trainMatrix[i]) else: p0Num += trainMatrix[i] p0Denom +=", "'I', 'love', 'him'], ['stop', 'posting', 'stupid', 'worthless', 'garbage'], ['mr', 'licks',", "0.0 p1Denom = 0.0 for i in range(numTrainDocs): if trainCategory[i]", "def createVocabList(dataSet): vocabSet = set([]) for document in dataSet: vocabSet", "/ float(numTrainDocs) p0Num = np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom =", "else: p0Num += trainMatrix[i] p0Denom += sum(trainMatrix[i]) p1Vect = p1Num", "np.zeros(numWords) p1Num = np.zeros(numWords) p0Denom = 0.0 p1Denom = 0.0", "numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num =", "* len(vocabList) for word in inputSet: if word in vocabList:", "print(trainMat) p0V, p1V, pAb = trainNB0(trainMat, classVec) print(p0V, p1V, pAb)", "loadDataSet() myVocabList = createVocabList(postinList) # print(setOfWords2Vec(myVocabList, postinList[0])) trainMat = []", "'stupid']] classVec = [0, 1, 0, 1, 0, 1] #", "return list(vocabSet) def setOfWords2Vec(vocabList, inputSet): returnVec = [0] * len(vocabList)", "len(trainMatrix) numWords = len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num", "[0] * len(vocabList) for word in inputSet: if word in", "len(trainMatrix[0]) pAbusive = sum(trainCategory) / float(numTrainDocs) p0Num = np.zeros(numWords) p1Num", "word: %s is not in my vocabulary' % word) return", "i in range(numTrainDocs): if trainCategory[i] == 1: p1Num += trainMatrix[i]" ]
[ "Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger except ImportError:", ".ahb import AHB from .debugger import Debugger, HaltError, NotHaltedError try:", "HaltError, NotHaltedError try: from .dwarf import ELFDebugger except ImportError: pass", "AHB from .debugger import Debugger, HaltError, NotHaltedError try: from .dwarf", ".debugger import Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger", ".swd import SWD from .ahb import AHB from .debugger import", "from .swd import SWD from .ahb import AHB from .debugger", "import SWD from .ahb import AHB from .debugger import Debugger,", "SWD from .ahb import AHB from .debugger import Debugger, HaltError,", "from .ahb import AHB from .debugger import Debugger, HaltError, NotHaltedError", "import AHB from .debugger import Debugger, HaltError, NotHaltedError try: from", "<reponame>segrids/arduino_due<filename>py/debug/__init__.py from .swd import SWD from .ahb import AHB from", "from .debugger import Debugger, HaltError, NotHaltedError try: from .dwarf import", "import Debugger, HaltError, NotHaltedError try: from .dwarf import ELFDebugger except" ]
[]
[]
[ "\"\"\"Given 2 endpoints, return the (new or cached) LineSegment3D inst.\"\"\"", "(p2, p1) if key not in self.seghash: return None return", "for a in range(3)) self.p2 = (self.p2[a] + offset[a] for", "+ sfx def __repr__(self): \"\"\"Standard string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self)", "endpoint's vertices\"\"\" self.p1 = (self.p1[a] * scale[a] for a in", "\"\"\"Returns a human readable coordinate string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset):", "a human readable coordinate string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate", "the line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class for 3D", "(self.p1[a] + offset[a] for a in range(3)) self.p2 = (self.p2[a]", "LineSegment3D inst.\"\"\" key = (p1, p2) if p1 < p2", "endpoint\"\"\" if p not in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg)", "hashes for changed edge vertices\"\"\" oldseghash = self.seghash self.seghash =", "p): \"\"\"Compare points for sort ordering in an arbitrary heirarchy.\"\"\"", "for v in oldseghash.values() } oldendhash = self.endhash self.endhash =", "= \"\" sep = \" \" sfx = \"\" p1", "if key not in self.seghash: return None return self.seghash[key] def", "self.seghash[key] def add(self, p1, p2): \"\"\"Given 2 endpoints, return the", "not in self.seghash: return None return self.seghash[key] def add(self, p1,", "for v in oldendhash.values() for k in v } def", "two endpoints.\"\"\" return 2 def __iter__(self): \"\"\"Iterator generator for endpoints.\"\"\"", "self.p2)) def __lt__(self, p): return self < p def __cmp__(self,", "(p2, p1) if key in self.seghash: seg = self.seghash[key] seg.count", "self.seghash: return None return self.seghash[key] def add(self, p1, p2): \"\"\"Given", "\"\" p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx +", "p1 < p2 else (p2, p1) if key in self.seghash:", "for a in range(3)) self.p2 = (self.p2[a] * scale[a] for", "string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns a human", "self.p1 = p1 self.p2 = p2 self.count = 1 def", "= \" \" sfx = \"\" p1 = self.p1.__format__(fmt) p2", "a vertex number, returns a vertex coordinate vector.\"\"\" if idx", "raise LookupError() def __hash__(self): \"\"\"Returns hash value for endpoints\"\"\" return", "{ (v[0], v[1]): v for v in oldseghash.values() } oldendhash", "hash((self.p1, self.p2)) def __lt__(self, p): return self < p def", "vertices of all edges.\"\"\" for v in self.seghash.values(): v.translate(offset) self.rehash()", "or cached) LineSegment3D inst.\"\"\" key = (p1, p2) if p1", "\"\"\"Given 2 endpoints, return the cached LineSegment3D inst, if any.\"\"\"", "def __hash__(self): \"\"\"Returns hash value for endpoints\"\"\" return hash((self.p1, self.p2))", "self.p2.__format__(fmt) return pfx + p1 + sep + p2 +", "= (p1, p2) if p1 < p2 else (p2, p1)", "for v in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale vertices", "= self.seghash self.seghash = { (v[0], v[1]): v for v", "for a in range(3)) def length(self): \"\"\"Returns the length of", "scale(self,scale): \"\"\"Scale vertices of all edges.\"\"\" for v in self.seghash.values():", "self.rehash() def endpoint_segments(self, p): \"\"\"get list of edges that end", "pfx = \"[\" sep = \", \" sfx = \"]\"", "edges.\"\"\" for v in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p):", "support.\"\"\" pfx = \"\" sep = \" - \" sfx", "p1 self.p2 = p2 self.count = 1 def __len__(self): \"\"\"Line", "line segments in the cache.\"\"\" for pt in self.seghash.values(): yield", "self.p1 = (self.p1[a] * scale[a] for a in range(3)) self.p2", "val != 0: return val return self[1].__cmp__(p[1]) def __format__(self, fmt):", "inst, if any.\"\"\" key = (p1, p2) if p1 <", "\"\"\"get list of edges that end at point p\"\"\" if", "3D Line Segments.\"\"\" def __init__(self): \"\"\"Initialize as an empty cache.\"\"\"", "in an arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if val !=", "returns a vertex coordinate vector.\"\"\" if idx == 0: return", "def __str__(self): \"\"\"Returns a human readable coordinate string.\"\"\" return \"{0:a}\".format(self)", "\" - \" sfx = \"\" if \"a\" in fmt:", "def endpoint_segments(self, p): \"\"\"get list of edges that end at", "all edges.\"\"\" for v in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self,", "self.seghash: seg = self.seghash[key] seg.count += 1 return seg seg", "ordering in an arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if val", "self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx + p1 + sep", "line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class for 3D Line", "this segment has a given endpoint\"\"\" if p not in", "in range(3)) self.p2 = (self.p2[a] + offset[a] for a in", "return \"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns a human readable coordinate", "as an empty cache.\"\"\" self.endhash = {} self.seghash = {}", "\"\"\"Reset the hashes for changed edge vertices\"\"\" oldseghash = self.seghash", "LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return", "in oldendhash.values() for k in v } def translate(self,offset): \"\"\"Translate", "in fmt: pfx = \"\" sep = \" \" sfx", "class for 3D Line Segments.\"\"\" def __init__(self): \"\"\"Initialize as an", "return self.p1 if idx == 1: return self.p2 raise LookupError()", "\"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns a human readable coordinate string.\"\"\"", "all edges.\"\"\" for v in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale):", "pt in self.seghash.values(): yield pt def __len__(self): \"\"\"Length of sequence.\"\"\"", "if p1 > p2: p1, p2 = (p2, p1) self.p1", "self.endhash = { k: v for v in oldendhash.values() for", "\"]\" elif \"s\" in fmt: pfx = \"\" sep =", "if p not in self.endhash: return [] return self.endhash[p] def", "= \", \" sfx = \"]\" elif \"s\" in fmt:", "= (self.p2[a] + offset[a] for a in range(3)) def scale(self,scale):", "* scale[a] for a in range(3)) self.p2 = (self.p2[a] *", "+ sep + p2 + sfx def __repr__(self): \"\"\"Standard string", "{} def _add_endpoint(self, p, seg): \"\"\"Remember that this segment has", "point p\"\"\" if p not in self.endhash: return [] return", "+ offset[a] for a in range(3)) def scale(self,scale): \"\"\"Translate the", "2 endpoints, return the cached LineSegment3D inst, if any.\"\"\" key", "3D line segment.\"\"\" def __init__(self, p1, p2): \"\"\"Initialize with two", "has two endpoints.\"\"\" return 2 def __iter__(self): \"\"\"Iterator generator for", "< p2 else (p2, p1) if key not in self.seghash:", "= p2 self.count = 1 def __len__(self): \"\"\"Line segment always", "* scale[a] for a in range(3)) def length(self): \"\"\"Returns the", "class to represent a 3D line segment.\"\"\" def __init__(self, p1,", "self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides .format() support.\"\"\" pfx = \"\"", "def __repr__(self): \"\"\"Standard string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def __str__(self):", "if val != 0: return val return self[1].__cmp__(p[1]) def __format__(self,", "return self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides .format() support.\"\"\" pfx =", "seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg def __iter__(self): \"\"\"Creates", "of all edges.\"\"\" for v in self.seghash.values(): v.scale(scale) self.rehash() def", "else (p2, p1) if key in self.seghash: seg = self.seghash[key]", "\"\" sep = \" - \" sfx = \"\" if", "def scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] *", "\"\"\"Initialize with two endpoints.\"\"\" if p1 > p2: p1, p2", "empty cache.\"\"\" self.endhash = {} self.seghash = {} def _add_endpoint(self,", "def __init__(self, p1, p2): \"\"\"Initialize with two endpoints.\"\"\" if p1", "\"\"\"Scale vertices of all edges.\"\"\" for v in self.seghash.values(): v.scale(scale)", "self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class for 3D Line Segments.\"\"\" def", "__iter__(self): \"\"\"Iterator generator for endpoints.\"\"\" yield self.p1 yield self.p2 def", "a in range(3)) def scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\" self.p1", "oldseghash.values() } oldendhash = self.endhash self.endhash = { k: v", "def __init__(self): \"\"\"Initialize as an empty cache.\"\"\" self.endhash = {}", "pt def __len__(self): \"\"\"Length of sequence.\"\"\" return len(self.seghash) # vim:", "} def translate(self,offset): \"\"\"Translate vertices of all edges.\"\"\" for v", "2 def __iter__(self): \"\"\"Iterator generator for endpoints.\"\"\" yield self.p1 yield", "\" sfx = \"\" if \"a\" in fmt: pfx =", "self.endhash = {} self.seghash = {} def _add_endpoint(self, p, seg):", "get(self, p1, p2): \"\"\"Given 2 endpoints, return the cached LineSegment3D", "self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg def", "\"\"\"Compare points for sort ordering in an arbitrary heirarchy.\"\"\" val", "p not in self.endhash: return [] return self.endhash[p] def get(self,", "endpoints, return the (new or cached) LineSegment3D inst.\"\"\" key =", "idx == 0: return self.p1 if idx == 1: return", "self.seghash self.seghash = { (v[0], v[1]): v for v in", "seg = LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2,", "= { (v[0], v[1]): v for v in oldseghash.values() }", "= (self.p1[a] * scale[a] for a in range(3)) self.p2 =", "represent a 3D line segment.\"\"\" def __init__(self, p1, p2): \"\"\"Initialize", "\"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a]", "return self.endhash[p] def get(self, p1, p2): \"\"\"Given 2 endpoints, return", "self.seghash = { (v[0], v[1]): v for v in oldseghash.values()", "self.p1 = (self.p1[a] + offset[a] for a in range(3)) self.p2", "any.\"\"\" key = (p1, p2) if p1 < p2 else", "self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale vertices of all edges.\"\"\"", "seg.count += 1 return seg seg = LineSegment3D(p1, p2) self.seghash[key]", "yield pt def __len__(self): \"\"\"Length of sequence.\"\"\" return len(self.seghash) #", "the endpoint's vertices\"\"\" self.p1 = (self.p1[a] * scale[a] for a", "offset[a] for a in range(3)) def scale(self,scale): \"\"\"Translate the endpoint's", "val = self[0].__cmp__(p[0]) if val != 0: return val return", "{0}>\".format(self) def __str__(self): \"\"\"Returns a human readable coordinate string.\"\"\" return", "= \"\" sep = \" - \" sfx = \"\"", "the cached LineSegment3D inst, if any.\"\"\" key = (p1, p2)", "sequence.\"\"\" return len(self.seghash) # vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap", "for k in v } def translate(self,offset): \"\"\"Translate vertices of", "in the cache.\"\"\" for pt in self.seghash.values(): yield pt def", "vertices\"\"\" self.p1 = (self.p1[a] + offset[a] for a in range(3))", "def _add_endpoint(self, p, seg): \"\"\"Remember that this segment has a", "sep = \", \" sfx = \"]\" elif \"s\" in", "fmt: pfx = \"\" sep = \" \" sfx =", "for sort ordering in an arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0])", "in range(3)) self.p2 = (self.p2[a] * scale[a] for a in", "\"s\" in fmt: pfx = \"\" sep = \" \"", "self.seghash[key] seg.count += 1 return seg seg = LineSegment3D(p1, p2)", "def __getitem__(self, idx): \"\"\"Given a vertex number, returns a vertex", "a 3D line segment.\"\"\" def __init__(self, p1, p2): \"\"\"Initialize with", "= p1 self.p2 = p2 self.count = 1 def __len__(self):", "__lt__(self, p): return self < p def __cmp__(self, p): \"\"\"Compare", "\"[\" sep = \", \" sfx = \"]\" elif \"s\"", "a in range(3)) self.p2 = (self.p2[a] + offset[a] for a", "length(self): \"\"\"Returns the length of the line.\"\"\" return self.p1.distFromPoint(self.p2) class", "def __iter__(self): \"\"\"Iterator generator for endpoints.\"\"\" yield self.p1 yield self.p2", "\"\"\"Given a vertex number, returns a vertex coordinate vector.\"\"\" if", "__format__(self, fmt): \"\"\"Provides .format() support.\"\"\" pfx = \"\" sep =", "in self.endhash: return [] return self.endhash[p] def get(self, p1, p2):", "if p1 < p2 else (p2, p1) if key not", "for a in range(3)) def scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\"", "= {} def _add_endpoint(self, p, seg): \"\"\"Remember that this segment", "\"\"\"Remember that this segment has a given endpoint\"\"\" if p", "edge vertices\"\"\" oldseghash = self.seghash self.seghash = { (v[0], v[1]):", "= \"\" p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx", "endpoints.\"\"\" if p1 > p2: p1, p2 = (p2, p1)", "\"\"\"Initialize as an empty cache.\"\"\" self.endhash = {} self.seghash =", "vertices\"\"\" oldseghash = self.seghash self.seghash = { (v[0], v[1]): v", ".format() support.\"\"\" pfx = \"\" sep = \" - \"", "def __format__(self, fmt): \"\"\"Provides .format() support.\"\"\" pfx = \"\" sep", "generator for endpoints.\"\"\" yield self.p1 yield self.p2 def __getitem__(self, idx):", "== 1: return self.p2 raise LookupError() def __hash__(self): \"\"\"Returns hash", "in range(3)) def scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 =", "if idx == 1: return self.p2 raise LookupError() def __hash__(self):", "cached LineSegment3D inst, if any.\"\"\" key = (p1, p2) if", "\"\"\"Provides .format() support.\"\"\" pfx = \"\" sep = \" -", "with two endpoints.\"\"\" if p1 > p2: p1, p2 =", "hash value for endpoints\"\"\" return hash((self.p1, self.p2)) def __lt__(self, p):", "v[1]): v for v in oldseghash.values() } oldendhash = self.endhash", "vertex coordinate vector.\"\"\" if idx == 0: return self.p1 if", "return val return self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides .format() support.\"\"\"", "__len__(self): \"\"\"Length of sequence.\"\"\" return len(self.seghash) # vim: expandtab tabstop=4", "p\"\"\" if p not in self.endhash: return [] return self.endhash[p]", "of all edges.\"\"\" for v in self.seghash.values(): v.translate(offset) self.rehash() def", "= \"[\" sep = \", \" sfx = \"]\" elif", "= self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx + p1 +", "self.p2 = (self.p2[a] * scale[a] for a in range(3)) def", "def get(self, p1, p2): \"\"\"Given 2 endpoints, return the cached", "def rehash(self): \"\"\"Reset the hashes for changed edge vertices\"\"\" oldseghash", "p2 else (p2, p1) if key not in self.seghash: return", "def __lt__(self, p): return self < p def __cmp__(self, p):", "return 2 def __iter__(self): \"\"\"Iterator generator for endpoints.\"\"\" yield self.p1", "__str__(self): \"\"\"Returns a human readable coordinate string.\"\"\" return \"{0:a}\".format(self) def", "range(3)) self.p2 = (self.p2[a] * scale[a] for a in range(3))", "segment always has two endpoints.\"\"\" return 2 def __iter__(self): \"\"\"Iterator", "heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if val != 0: return val", "not in self.endhash: return [] return self.endhash[p] def get(self, p1,", "return hash((self.p1, self.p2)) def __lt__(self, p): return self < p", "for 3D Line Segments.\"\"\" def __init__(self): \"\"\"Initialize as an empty", "coordinate string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\"", "oldendhash.values() for k in v } def translate(self,offset): \"\"\"Translate vertices", "None return self.seghash[key] def add(self, p1, p2): \"\"\"Given 2 endpoints,", "endpoints.\"\"\" yield self.p1 yield self.p2 def __getitem__(self, idx): \"\"\"Given a", "endpoint's vertices\"\"\" self.p1 = (self.p1[a] + offset[a] for a in", "2 endpoints, return the (new or cached) LineSegment3D inst.\"\"\" key", "LineSegment3DCache(object): \"\"\"Cache class for 3D Line Segments.\"\"\" def __init__(self): \"\"\"Initialize", "if idx == 0: return self.p1 if idx == 1:", "\"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] + offset[a] for", "the cache.\"\"\" for pt in self.seghash.values(): yield pt def __len__(self):", "p1) self.p1 = p1 self.p2 = p2 self.count = 1", "def length(self): \"\"\"Returns the length of the line.\"\"\" return self.p1.distFromPoint(self.p2)", "endpoints.\"\"\" return 2 def __iter__(self): \"\"\"Iterator generator for endpoints.\"\"\" yield", "\" sfx = \"]\" elif \"s\" in fmt: pfx =", "oldendhash = self.endhash self.endhash = { k: v for v", "in self.seghash: return None return self.seghash[key] def add(self, p1, p2):", "in v } def translate(self,offset): \"\"\"Translate vertices of all edges.\"\"\"", "self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the hashes", "LineSegment3D inst, if any.\"\"\" key = (p1, p2) if p1", "seg def __iter__(self): \"\"\"Creates an iterator for the line segments", "return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class for 3D Line Segments.\"\"\"", "self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the hashes for changed edge vertices\"\"\"", "__getitem__(self, idx): \"\"\"Given a vertex number, returns a vertex coordinate", "\"\"\"Cache class for 3D Line Segments.\"\"\" def __init__(self): \"\"\"Initialize as", "sfx = \"\" if \"a\" in fmt: pfx = \"[\"", "\"\"\"Creates an iterator for the line segments in the cache.\"\"\"", "class LineSegment3D(object): \"\"\"A class to represent a 3D line segment.\"\"\"", "seg): \"\"\"Remember that this segment has a given endpoint\"\"\" if", "an arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if val != 0:", "\"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] * scale[a] for", "return self < p def __cmp__(self, p): \"\"\"Compare points for", "scale[a] for a in range(3)) self.p2 = (self.p2[a] * scale[a]", "= {} self.seghash = {} def _add_endpoint(self, p, seg): \"\"\"Remember", "two endpoints.\"\"\" if p1 > p2: p1, p2 = (p2,", "self.endhash self.endhash = { k: v for v in oldendhash.values()", "p1) if key not in self.seghash: return None return self.seghash[key]", "key = (p1, p2) if p1 < p2 else (p2,", "def translate(self,offset): \"\"\"Translate vertices of all edges.\"\"\" for v in", "return seg seg = LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1,", "\"\"\"Iterator generator for endpoints.\"\"\" yield self.p1 yield self.p2 def __getitem__(self,", "p1, p2 = (p2, p1) self.p1 = p1 self.p2 =", "self._add_endpoint(p2, seg) return seg def __iter__(self): \"\"\"Creates an iterator for", "coordinate vector.\"\"\" if idx == 0: return self.p1 if idx", "def translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] +", "seg) return seg def __iter__(self): \"\"\"Creates an iterator for the", "translate(self,offset): \"\"\"Translate vertices of all edges.\"\"\" for v in self.seghash.values():", "readable coordinate string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the endpoint's", "= LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg)", "v } def translate(self,offset): \"\"\"Translate vertices of all edges.\"\"\" for", "__hash__(self): \"\"\"Returns hash value for endpoints\"\"\" return hash((self.p1, self.p2)) def", "v for v in oldseghash.values() } oldendhash = self.endhash self.endhash", "segment has a given endpoint\"\"\" if p not in self.endhash:", "of the line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class for", "self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the hashes for", "vertices\"\"\" self.p1 = (self.p1[a] * scale[a] for a in range(3))", "a in range(3)) def length(self): \"\"\"Returns the length of the", "return [] return self.endhash[p] def get(self, p1, p2): \"\"\"Given 2", "v.scale(scale) self.rehash() def endpoint_segments(self, p): \"\"\"get list of edges that", "vertices of all edges.\"\"\" for v in self.seghash.values(): v.scale(scale) self.rehash()", "in self.seghash: seg = self.seghash[key] seg.count += 1 return seg", "for v in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p): \"\"\"get", "of sequence.\"\"\" return len(self.seghash) # vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4", "+ p2 + sfx def __repr__(self): \"\"\"Standard string representation.\"\"\" return", "\"\"\"Standard string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns a", "__repr__(self): \"\"\"Standard string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns", "in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale vertices of all", "p2 + sfx def __repr__(self): \"\"\"Standard string representation.\"\"\" return \"<LineSegment3D:", "(self.p2[a] * scale[a] for a in range(3)) def length(self): \"\"\"Returns", "LineSegment3D(object): \"\"\"A class to represent a 3D line segment.\"\"\" def", "in oldseghash.values() } oldendhash = self.endhash self.endhash = { k:", "return pfx + p1 + sep + p2 + sfx", "for changed edge vertices\"\"\" oldseghash = self.seghash self.seghash = {", "\"\"\"Length of sequence.\"\"\" return len(self.seghash) # vim: expandtab tabstop=4 shiftwidth=4", "an empty cache.\"\"\" self.endhash = {} self.seghash = {} def", "pfx = \"\" sep = \" \" sfx = \"\"", "a in range(3)) self.p2 = (self.p2[a] * scale[a] for a", "1: return self.p2 raise LookupError() def __hash__(self): \"\"\"Returns hash value", "1 return seg seg = LineSegment3D(p1, p2) self.seghash[key] = seg", "iterator for the line segments in the cache.\"\"\" for pt", "for endpoints\"\"\" return hash((self.p1, self.p2)) def __lt__(self, p): return self", "offset[a] for a in range(3)) self.p2 = (self.p2[a] + offset[a]", "(new or cached) LineSegment3D inst.\"\"\" key = (p1, p2) if", "\" \" sfx = \"\" p1 = self.p1.__format__(fmt) p2 =", "key in self.seghash: seg = self.seghash[key] seg.count += 1 return", "= (self.p1[a] + offset[a] for a in range(3)) self.p2 =", "return None return self.seghash[key] def add(self, p1, p2): \"\"\"Given 2", "sep + p2 + sfx def __repr__(self): \"\"\"Standard string representation.\"\"\"", "self.endhash: return [] return self.endhash[p] def get(self, p1, p2): \"\"\"Given", "in range(3)) def length(self): \"\"\"Returns the length of the line.\"\"\"", "the length of the line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache", "cache.\"\"\" for pt in self.seghash.values(): yield pt def __len__(self): \"\"\"Length", "def __iter__(self): \"\"\"Creates an iterator for the line segments in", "self.rehash() def scale(self,scale): \"\"\"Scale vertices of all edges.\"\"\" for v", "self.seghash = {} def _add_endpoint(self, p, seg): \"\"\"Remember that this", "sort ordering in an arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if", "self.p1 if idx == 1: return self.p2 raise LookupError() def", "for the line segments in the cache.\"\"\" for pt in", "seg) self._add_endpoint(p2, seg) return seg def __iter__(self): \"\"\"Creates an iterator", "number, returns a vertex coordinate vector.\"\"\" if idx == 0:", "p): return self < p def __cmp__(self, p): \"\"\"Compare points", "end at point p\"\"\" if p not in self.endhash: return", "p2: p1, p2 = (p2, p1) self.p1 = p1 self.p2", "self.p1 yield self.p2 def __getitem__(self, idx): \"\"\"Given a vertex number,", "the endpoint's vertices\"\"\" self.p1 = (self.p1[a] + offset[a] for a", "+ p1 + sep + p2 + sfx def __repr__(self):", "sfx def __repr__(self): \"\"\"Standard string representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def", "self.p2 = (self.p2[a] + offset[a] for a in range(3)) def", "return the cached LineSegment3D inst, if any.\"\"\" key = (p1,", "key not in self.seghash: return None return self.seghash[key] def add(self,", "return seg def __iter__(self): \"\"\"Creates an iterator for the line", "> p2: p1, p2 = (p2, p1) self.p1 = p1", "cache.\"\"\" self.endhash = {} self.seghash = {} def _add_endpoint(self, p,", "__iter__(self): \"\"\"Creates an iterator for the line segments in the", "(p2, p1) self.p1 = p1 self.p2 = p2 self.count =", "p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return pfx + p1", "self < p def __cmp__(self, p): \"\"\"Compare points for sort", "p2): \"\"\"Given 2 endpoints, return the (new or cached) LineSegment3D", "value for endpoints\"\"\" return hash((self.p1, self.p2)) def __lt__(self, p): return", "+ offset[a] for a in range(3)) self.p2 = (self.p2[a] +", "def scale(self,scale): \"\"\"Scale vertices of all edges.\"\"\" for v in", "(self.p2[a] + offset[a] for a in range(3)) def scale(self,scale): \"\"\"Translate", "to represent a 3D line segment.\"\"\" def __init__(self, p1, p2):", "\", \" sfx = \"]\" elif \"s\" in fmt: pfx", "p1 < p2 else (p2, p1) if key not in", "if p not in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def", "for pt in self.seghash.values(): yield pt def __len__(self): \"\"\"Length of", "sep = \" - \" sfx = \"\" if \"a\"", "__init__(self, p1, p2): \"\"\"Initialize with two endpoints.\"\"\" if p1 >", "for endpoints.\"\"\" yield self.p1 yield self.p2 def __getitem__(self, idx): \"\"\"Given", "< p2 else (p2, p1) if key in self.seghash: seg", "edges that end at point p\"\"\" if p not in", "self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg def __iter__(self): \"\"\"Creates an", "yield self.p1 yield self.p2 def __getitem__(self, idx): \"\"\"Given a vertex", "= \"\" if \"a\" in fmt: pfx = \"[\" sep", "points for sort ordering in an arbitrary heirarchy.\"\"\" val =", "+= 1 return seg seg = LineSegment3D(p1, p2) self.seghash[key] =", "\"a\" in fmt: pfx = \"[\" sep = \", \"", "fmt: pfx = \"[\" sep = \", \" sfx =", "of edges that end at point p\"\"\" if p not", "self.count = 1 def __len__(self): \"\"\"Line segment always has two", "return self.seghash[key] def add(self, p1, p2): \"\"\"Given 2 endpoints, return", "p1, p2): \"\"\"Given 2 endpoints, return the (new or cached)", "if any.\"\"\" key = (p1, p2) if p1 < p2", "- \" sfx = \"\" if \"a\" in fmt: pfx", "= (p2, p1) self.p1 = p1 self.p2 = p2 self.count", "= { k: v for v in oldendhash.values() for k", "a given endpoint\"\"\" if p not in self.endhash: self.endhash[p] =", "in fmt: pfx = \"[\" sep = \", \" sfx", "return the (new or cached) LineSegment3D inst.\"\"\" key = (p1,", "p2 = (p2, p1) self.p1 = p1 self.p2 = p2", "add(self, p1, p2): \"\"\"Given 2 endpoints, return the (new or", "endpoints\"\"\" return hash((self.p1, self.p2)) def __lt__(self, p): return self <", "= self.p2.__format__(fmt) return pfx + p1 + sep + p2", "\"\"\"Line segment always has two endpoints.\"\"\" return 2 def __iter__(self):", "< p def __cmp__(self, p): \"\"\"Compare points for sort ordering", "== 0: return self.p1 if idx == 1: return self.p2", "not in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset", "p def __cmp__(self, p): \"\"\"Compare points for sort ordering in", "a vertex coordinate vector.\"\"\" if idx == 0: return self.p1", "(v[0], v[1]): v for v in oldseghash.values() } oldendhash =", "\"\"\"Translate vertices of all edges.\"\"\" for v in self.seghash.values(): v.translate(offset)", "v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale vertices of all edges.\"\"\" for", "= 1 def __len__(self): \"\"\"Line segment always has two endpoints.\"\"\"", "self.endhash[p] def get(self, p1, p2): \"\"\"Given 2 endpoints, return the", "at point p\"\"\" if p not in self.endhash: return []", "{ k: v for v in oldendhash.values() for k in", "def __cmp__(self, p): \"\"\"Compare points for sort ordering in an", "endpoint_segments(self, p): \"\"\"get list of edges that end at point", "(p1, p2) if p1 < p2 else (p2, p1) if", "p, seg): \"\"\"Remember that this segment has a given endpoint\"\"\"", "\"\"\"Returns the length of the line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object):", "length of the line.\"\"\" return self.p1.distFromPoint(self.p2) class LineSegment3DCache(object): \"\"\"Cache class", "\"\" if \"a\" in fmt: pfx = \"[\" sep =", "p): \"\"\"get list of edges that end at point p\"\"\"", "self.seghash.values(): yield pt def __len__(self): \"\"\"Length of sequence.\"\"\" return len(self.seghash)", "[] return self.endhash[p] def get(self, p1, p2): \"\"\"Given 2 endpoints,", "if key in self.seghash: seg = self.seghash[key] seg.count += 1", "that this segment has a given endpoint\"\"\" if p not", "cached) LineSegment3D inst.\"\"\" key = (p1, p2) if p1 <", "idx): \"\"\"Given a vertex number, returns a vertex coordinate vector.\"\"\"", "p2): \"\"\"Given 2 endpoints, return the cached LineSegment3D inst, if", "\"\"\"Returns hash value for endpoints\"\"\" return hash((self.p1, self.p2)) def __lt__(self,", "p2 self.count = 1 def __len__(self): \"\"\"Line segment always has", "__cmp__(self, p): \"\"\"Compare points for sort ordering in an arbitrary", "return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 =", "endpoints, return the cached LineSegment3D inst, if any.\"\"\" key =", "seg = self.seghash[key] seg.count += 1 return seg seg =", "p2): \"\"\"Initialize with two endpoints.\"\"\" if p1 > p2: p1,", "Line Segments.\"\"\" def __init__(self): \"\"\"Initialize as an empty cache.\"\"\" self.endhash", "p2) self.seghash[key] = seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg", "p1, p2): \"\"\"Initialize with two endpoints.\"\"\" if p1 > p2:", "0: return self.p1 if idx == 1: return self.p2 raise", "= [] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the hashes for changed", "LookupError() def __hash__(self): \"\"\"Returns hash value for endpoints\"\"\" return hash((self.p1,", "__init__(self): \"\"\"Initialize as an empty cache.\"\"\" self.endhash = {} self.seghash", "= self.endhash self.endhash = { k: v for v in", "pfx + p1 + sep + p2 + sfx def", "edges.\"\"\" for v in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale", "idx == 1: return self.p2 raise LookupError() def __hash__(self): \"\"\"Returns", "changed edge vertices\"\"\" oldseghash = self.seghash self.seghash = { (v[0],", "1 def __len__(self): \"\"\"Line segment always has two endpoints.\"\"\" return", "= \" - \" sfx = \"\" if \"a\" in", "seg seg = LineSegment3D(p1, p2) self.seghash[key] = seg self._add_endpoint(p1, seg)", "range(3)) def scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a]", "string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\" self.p1", "__len__(self): \"\"\"Line segment always has two endpoints.\"\"\" return 2 def", "def add(self, p1, p2): \"\"\"Given 2 endpoints, return the (new", "k in v } def translate(self,offset): \"\"\"Translate vertices of all", "p not in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self):", "rehash(self): \"\"\"Reset the hashes for changed edge vertices\"\"\" oldseghash =", "sfx = \"\" p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt) return", "= self[0].__cmp__(p[0]) if val != 0: return val return self[1].__cmp__(p[1])", "elif \"s\" in fmt: pfx = \"\" sep = \"", "self.p2 raise LookupError() def __hash__(self): \"\"\"Returns hash value for endpoints\"\"\"", "the line segments in the cache.\"\"\" for pt in self.seghash.values():", "} oldendhash = self.endhash self.endhash = { k: v for", "in self.seghash.values(): yield pt def __len__(self): \"\"\"Length of sequence.\"\"\" return", "pfx = \"\" sep = \" - \" sfx =", "representation.\"\"\" return \"<LineSegment3D: {0}>\".format(self) def __str__(self): \"\"\"Returns a human readable", "sfx = \"]\" elif \"s\" in fmt: pfx = \"\"", "in self.endhash: self.endhash[p] = [] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the", "self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p): \"\"\"get list of edges", "{} self.seghash = {} def _add_endpoint(self, p, seg): \"\"\"Remember that", "always has two endpoints.\"\"\" return 2 def __iter__(self): \"\"\"Iterator generator", "the hashes for changed edge vertices\"\"\" oldseghash = self.seghash self.seghash", "p1, p2): \"\"\"Given 2 endpoints, return the cached LineSegment3D inst,", "range(3)) self.p2 = (self.p2[a] + offset[a] for a in range(3))", "= (self.p2[a] * scale[a] for a in range(3)) def length(self):", "segments in the cache.\"\"\" for pt in self.seghash.values(): yield pt", "def __len__(self): \"\"\"Line segment always has two endpoints.\"\"\" return 2", "self[0].__cmp__(p[0]) if val != 0: return val return self[1].__cmp__(p[1]) def", "v for v in oldendhash.values() for k in v }", "v in self.seghash.values(): v.translate(offset) self.rehash() def scale(self,scale): \"\"\"Scale vertices of", "return self.p2 raise LookupError() def __hash__(self): \"\"\"Returns hash value for", "that end at point p\"\"\" if p not in self.endhash:", "0: return val return self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides .format()", "p1 > p2: p1, p2 = (p2, p1) self.p1 =", "human readable coordinate string.\"\"\" return \"{0:a}\".format(self) def translate(self,offset): \"\"\"Translate the", "line segment.\"\"\" def __init__(self, p1, p2): \"\"\"Initialize with two endpoints.\"\"\"", "v in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p): \"\"\"get list", "= self.seghash[key] seg.count += 1 return seg seg = LineSegment3D(p1,", "segment.\"\"\" def __init__(self, p1, p2): \"\"\"Initialize with two endpoints.\"\"\" if", "inst.\"\"\" key = (p1, p2) if p1 < p2 else", "an iterator for the line segments in the cache.\"\"\" for", "!= 0: return val return self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides", "oldseghash = self.seghash self.seghash = { (v[0], v[1]): v for", "val return self[1].__cmp__(p[1]) def __format__(self, fmt): \"\"\"Provides .format() support.\"\"\" pfx", "class LineSegment3DCache(object): \"\"\"Cache class for 3D Line Segments.\"\"\" def __init__(self):", "else (p2, p1) if key not in self.seghash: return None", "\" sfx = \"\" p1 = self.p1.__format__(fmt) p2 = self.p2.__format__(fmt)", "p2 = self.p2.__format__(fmt) return pfx + p1 + sep +", "p1 + sep + p2 + sfx def __repr__(self): \"\"\"Standard", "(self.p1[a] * scale[a] for a in range(3)) self.p2 = (self.p2[a]", "sep = \" \" sfx = \"\" p1 = self.p1.__format__(fmt)", "the (new or cached) LineSegment3D inst.\"\"\" key = (p1, p2)", "self.p2 = p2 self.count = 1 def __len__(self): \"\"\"Line segment", "\"\" sep = \" \" sfx = \"\" p1 =", "vertex number, returns a vertex coordinate vector.\"\"\" if idx ==", "range(3)) def length(self): \"\"\"Returns the length of the line.\"\"\" return", "p1) if key in self.seghash: seg = self.seghash[key] seg.count +=", "list of edges that end at point p\"\"\" if p", "_add_endpoint(self, p, seg): \"\"\"Remember that this segment has a given", "vector.\"\"\" if idx == 0: return self.p1 if idx ==", "has a given endpoint\"\"\" if p not in self.endhash: self.endhash[p]", "= \"]\" elif \"s\" in fmt: pfx = \"\" sep", "if \"a\" in fmt: pfx = \"[\" sep = \",", "def __len__(self): \"\"\"Length of sequence.\"\"\" return len(self.seghash) # vim: expandtab", "k: v for v in oldendhash.values() for k in v", "arbitrary heirarchy.\"\"\" val = self[0].__cmp__(p[0]) if val != 0: return", "\"\"\"A class to represent a 3D line segment.\"\"\" def __init__(self,", "[] self.endhash[p].append(seg) def rehash(self): \"\"\"Reset the hashes for changed edge", "scale(self,scale): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] * scale[a]", "scale[a] for a in range(3)) def length(self): \"\"\"Returns the length", "= seg self._add_endpoint(p1, seg) self._add_endpoint(p2, seg) return seg def __iter__(self):", "yield self.p2 def __getitem__(self, idx): \"\"\"Given a vertex number, returns", "v in oldseghash.values() } oldendhash = self.endhash self.endhash = {", "p2 else (p2, p1) if key in self.seghash: seg =", "Segments.\"\"\" def __init__(self): \"\"\"Initialize as an empty cache.\"\"\" self.endhash =", "if p1 < p2 else (p2, p1) if key in", "translate(self,offset): \"\"\"Translate the endpoint's vertices\"\"\" self.p1 = (self.p1[a] + offset[a]", "v in oldendhash.values() for k in v } def translate(self,offset):", "given endpoint\"\"\" if p not in self.endhash: self.endhash[p] = []", "p2) if p1 < p2 else (p2, p1) if key", "self.p2 def __getitem__(self, idx): \"\"\"Given a vertex number, returns a", "in self.seghash.values(): v.scale(scale) self.rehash() def endpoint_segments(self, p): \"\"\"get list of", "fmt): \"\"\"Provides .format() support.\"\"\" pfx = \"\" sep = \"" ]
[ "if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls", "multiget(self, keys): cls = self.__class__ res = self.select(cls.key, cls.value) \\", "= self.__class__ res = self.select(cls.key, cls.value) \\ .where(cls.key << keys", "unique=True) value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0)", "{ k[len(prefix):] : v for k, v in res.items() }", "key, value in data.items(): kvs.append({ cls.key : key, cls.value :", "list(self, prefix=None, limit=None): cls = self.__class__ q = self.select(cls.key, cls.value)", "= [ key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute()", "ttl = peewee.IntegerField(default=0) class Meta: database = peewee.Proxy() def __init__(self,", "data.items(): kvs.append({ cls.key : key, cls.value : value, cls.mtime :", "<< keys).execute() def list(self, prefix=None, limit=None): cls = self.__class__ q", "= self.__class__ return cls.ttl == 0 | (cls.mtime + cls.ttl", "ts = int(time()) cls = self.__class__ return cls.ttl == 0", "for k, v in res.items() } return res def __ttl_filter(self):", "ttl or 0 kvs = [] for key, value in", "res = self.select(cls.key, cls.value) \\ .where(cls.key << keys & self.__ttl_filter())", "import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True) value", "x[0] : x[1] for x in q.tuples() } if prefix:", "= ttl or 0 kvs = [] for key, value", "import time from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key", "to filter out stale results \"\"\" ts = int(time()) cls", "value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class", "= int(time()) ttl = ttl or 0 kvs = []", "clause to a query, to filter out stale results \"\"\"", "stale results \"\"\" ts = int(time()) cls = self.__class__ return", "kvs.append({ cls.key : key, cls.value : value, cls.mtime : ts,", "('%s%s' % (prefix, wildcard))) q = q.where(self.__ttl_filter()) if limit: q", "peewee.SqliteDatabase: wildcard = '*' else: wildcard = '%' q =", "cls = self.__class__ res = self.select(cls.key, cls.value) \\ .where(cls.key <<", "self.select(cls.key, cls.value) \\ .where(cls.key << keys & self.__ttl_filter()) \\ .tuples()", "else: wildcard = '%' q = q.where(cls.key % ('%s%s' %", "in res } @classmethod def multiset(cls, data, ttl=None): ts =", "Meta: database = peewee.Proxy() def __init__(self, db_connection, table_name=None): if table_name:", "res = { k[len(prefix):] : v for k, v in", "int(time()) cls = self.__class__ return cls.ttl == 0 | (cls.mtime", "__init__(self, db_connection, table_name=None): if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def", "{ x[0] : x[1] for x in res } @classmethod", "= q.where(cls.key % ('%s%s' % (prefix, wildcard))) q = q.where(self.__ttl_filter())", "q.where(cls.key % ('%s%s' % (prefix, wildcard))) q = q.where(self.__ttl_filter()) if", "for x in q.tuples() } if prefix: res = {", "cls.value) \\ .where(cls.key << keys & self.__ttl_filter()) \\ .tuples() return", "def __ttl_filter(self): \"\"\" Add the TTL where clause to a", "= peewee.IntegerField(default=0) class Meta: database = peewee.Proxy() def __init__(self, db_connection,", "playhouse.kv from time import time from . import CacheableAdapter class", "}) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list == type(key_or_keys): keys", "self.__class__ res = self.select(cls.key, cls.value) \\ .where(cls.key << keys &", "cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list", "cls.value : value, cls.mtime : ts, cls.ttl : ttl, })", "prefix: res = { k[len(prefix):] : v for k, v", "key, cls.value : value, cls.mtime : ts, cls.ttl : ttl,", "keys = [ key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key <<", "def delete(self, key_or_keys): if list == type(key_or_keys): keys = key_or_keys", "in q.tuples() } if prefix: res = { k[len(prefix):] :", "limit=None): cls = self.__class__ q = self.select(cls.key, cls.value) if prefix:", "self.__class__ return cls.ttl == 0 | (cls.mtime + cls.ttl >", "in data.items(): kvs.append({ cls.key : key, cls.value : value, cls.mtime", "if prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard = '*' else:", "\"\"\" Add the TTL where clause to a query, to", "database = peewee.Proxy() def __init__(self, db_connection, table_name=None): if table_name: self._meta.db_table", "\"\"\" ts = int(time()) cls = self.__class__ return cls.ttl ==", "cls.mtime : ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self,", "x in q.tuples() } if prefix: res = { k[len(prefix):]", "ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if", "to a query, to filter out stale results \"\"\" ts", "k[len(prefix):] : v for k, v in res.items() } return", "% ('%s%s' % (prefix, wildcard))) q = q.where(self.__ttl_filter()) if limit:", "key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self,", "cls.key : key, cls.value : value, cls.mtime : ts, cls.ttl", "PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime", "def __init__(self, db_connection, table_name=None): if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection)", "\\ .tuples() return { x[0] : x[1] for x in", "peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta: database = peewee.Proxy() def", "= self.select(cls.key, cls.value) \\ .where(cls.key << keys & self.__ttl_filter()) \\", "= '*' else: wildcard = '%' q = q.where(cls.key %", "or 0 kvs = [] for key, value in data.items():", "cls = self.__class__ q = self.select(cls.key, cls.value) if prefix: if", "TTL where clause to a query, to filter out stale", "q = self.select(cls.key, cls.value) if prefix: if self.__db_type() == peewee.SqliteDatabase:", "q.tuples() } if prefix: res = { k[len(prefix):] : v", "peewee.IntegerField(default=0) class Meta: database = peewee.Proxy() def __init__(self, db_connection, table_name=None):", "= { k[len(prefix):] : v for k, v in res.items()", "wildcard))) q = q.where(self.__ttl_filter()) if limit: q = q.limit(limit) res", "value, cls.mtime : ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def", "self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls = self.__class__", "= self.__class__ q = self.select(cls.key, cls.value) if prefix: if self.__db_type()", "list == type(key_or_keys): keys = key_or_keys else: keys = [", ". import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True)", "q.limit(limit) res = { x[0] : x[1] for x in", "self.__class__ q = self.select(cls.key, cls.value) if prefix: if self.__db_type() ==", "if list == type(key_or_keys): keys = key_or_keys else: keys =", "cls.ttl == 0 | (cls.mtime + cls.ttl > ts) def", "results \"\"\" ts = int(time()) cls = self.__class__ return cls.ttl", "== type(key_or_keys): keys = key_or_keys else: keys = [ key_or_keys", "res def __ttl_filter(self): \"\"\" Add the TTL where clause to", "& self.__ttl_filter()) \\ .tuples() return { x[0] : x[1] for", "= peewee.Proxy() def __init__(self, db_connection, table_name=None): if table_name: self._meta.db_table =", "= q.where(self.__ttl_filter()) if limit: q = q.limit(limit) res = {", "int(time()) ttl = ttl or 0 kvs = [] for", "res.items() } return res def __ttl_filter(self): \"\"\" Add the TTL", "ts = int(time()) ttl = ttl or 0 kvs =", "key = peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time)", "limit: q = q.limit(limit) res = { x[0] : x[1]", "wildcard = '*' else: wildcard = '%' q = q.where(cls.key", "<reponame>d1hotpep/cacheable import peewee import playhouse.kv from time import time from", "v for k, v in res.items() } return res def", "x in res } @classmethod def multiset(cls, data, ttl=None): ts", "\\ .where(cls.key << keys & self.__ttl_filter()) \\ .tuples() return {", "[] for key, value in data.items(): kvs.append({ cls.key : key,", "table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls = self.__class__ res =", "'*' else: wildcard = '%' q = q.where(cls.key % ('%s%s'", "= q.limit(limit) res = { x[0] : x[1] for x", "(prefix, wildcard))) q = q.where(self.__ttl_filter()) if limit: q = q.limit(limit)", "where clause to a query, to filter out stale results", "for x in res } @classmethod def multiset(cls, data, ttl=None):", "from time import time from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter,", "peewee.Model): key = peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime =", "CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True) value =", "else: keys = [ key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key", "key_or_keys else: keys = [ key_or_keys ] cls = self.__class__", "cls.value) if prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard = '*'", "0 | (cls.mtime + cls.ttl > ts) def __db_type(self): return", "keys = key_or_keys else: keys = [ key_or_keys ] cls", "return cls.ttl == 0 | (cls.mtime + cls.ttl > ts)", "self.__ttl_filter()) \\ .tuples() return { x[0] : x[1] for x", "def list(self, prefix=None, limit=None): cls = self.__class__ q = self.select(cls.key,", "key_or_keys): if list == type(key_or_keys): keys = key_or_keys else: keys", ": key, cls.value : value, cls.mtime : ts, cls.ttl :", "= [] for key, value in data.items(): kvs.append({ cls.key :", "keys).execute() def list(self, prefix=None, limit=None): cls = self.__class__ q =", "if prefix: res = { k[len(prefix):] : v for k,", "delete(self, key_or_keys): if list == type(key_or_keys): keys = key_or_keys else:", "peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None, limit=None): cls = self.__class__", "filter out stale results \"\"\" ts = int(time()) cls =", "= playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta:", "def multiget(self, keys): cls = self.__class__ res = self.select(cls.key, cls.value)", "table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls =", "return { x[0] : x[1] for x in res }", "peewee.Proxy() def __init__(self, db_connection, table_name=None): if table_name: self._meta.db_table = table_name", "multiset(cls, data, ttl=None): ts = int(time()) ttl = ttl or", "wildcard = '%' q = q.where(cls.key % ('%s%s' % (prefix,", ": x[1] for x in q.tuples() } if prefix: res", "== 0 | (cls.mtime + cls.ttl > ts) def __db_type(self):", "= peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta: database = peewee.Proxy()", "ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list == type(key_or_keys):", "= '%' q = q.where(cls.key % ('%s%s' % (prefix, wildcard)))", "= { x[0] : x[1] for x in q.tuples() }", "def multiset(cls, data, ttl=None): ts = int(time()) ttl = ttl", "__ttl_filter(self): \"\"\" Add the TTL where clause to a query,", "db_connection, table_name=None): if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self,", "mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta: database =", ".tuples() return { x[0] : x[1] for x in res", ": ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys):", "= self.select(cls.key, cls.value) if prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard", "q = q.where(cls.key % ('%s%s' % (prefix, wildcard))) q =", ": value, cls.mtime : ts, cls.ttl : ttl, }) cls.insert_many(kvs).upsert().execute()", "from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256,", "= table_name self._meta.database.initialize(db_connection) def multiget(self, keys): cls = self.__class__ res", "q.where(self.__ttl_filter()) if limit: q = q.limit(limit) res = { x[0]", "ttl=None): ts = int(time()) ttl = ttl or 0 kvs", "a query, to filter out stale results \"\"\" ts =", "peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl =", "kvs = [] for key, value in data.items(): kvs.append({ cls.key", "<< keys & self.__ttl_filter()) \\ .tuples() return { x[0] :", ": x[1] for x in res } @classmethod def multiset(cls,", "0 kvs = [] for key, value in data.items(): kvs.append({", "value in data.items(): kvs.append({ cls.key : key, cls.value : value,", "= self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None, limit=None): cls", ".where(cls.key << keys & self.__ttl_filter()) \\ .tuples() return { x[0]", "query, to filter out stale results \"\"\" ts = int(time())", "| (cls.mtime + cls.ttl > ts) def __db_type(self): return type(self._meta.database.obj)", "q = q.where(self.__ttl_filter()) if limit: q = q.limit(limit) res =", "peewee import playhouse.kv from time import time from . import", "} if prefix: res = { k[len(prefix):] : v for", "self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None, limit=None): cls =", "import peewee import playhouse.kv from time import time from .", "return res def __ttl_filter(self): \"\"\" Add the TTL where clause", "self.__db_type() == peewee.SqliteDatabase: wildcard = '*' else: wildcard = '%'", "time import time from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model):", "keys): cls = self.__class__ res = self.select(cls.key, cls.value) \\ .where(cls.key", "playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl = peewee.IntegerField(default=0) class Meta: database", "res } @classmethod def multiset(cls, data, ttl=None): ts = int(time())", "for key, value in data.items(): kvs.append({ cls.key : key, cls.value", "import playhouse.kv from time import time from . import CacheableAdapter", "class Meta: database = peewee.Proxy() def __init__(self, db_connection, table_name=None): if", "cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list == type(key_or_keys): keys =", "'%' q = q.where(cls.key % ('%s%s' % (prefix, wildcard))) q", "type(key_or_keys): keys = key_or_keys else: keys = [ key_or_keys ]", "if limit: q = q.limit(limit) res = { x[0] :", "= int(time()) cls = self.__class__ return cls.ttl == 0 |", "[ key_or_keys ] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def", "Add the TTL where clause to a query, to filter", "prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard = '*' else: wildcard", ": v for k, v in res.items() } return res", "res = { x[0] : x[1] for x in q.tuples()", "x[1] for x in res } @classmethod def multiset(cls, data,", "data, ttl=None): ts = int(time()) ttl = ttl or 0", "time from . import CacheableAdapter class PeeweeAdapter(CacheableAdapter, peewee.Model): key =", "= key_or_keys else: keys = [ key_or_keys ] cls =", "cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None, limit=None):", "} return res def __ttl_filter(self): \"\"\" Add the TTL where", "class PeeweeAdapter(CacheableAdapter, peewee.Model): key = peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField()", "== peewee.SqliteDatabase: wildcard = '*' else: wildcard = '%' q", "@classmethod def multiset(cls, data, ttl=None): ts = int(time()) ttl =", "{ x[0] : x[1] for x in q.tuples() } if", "x[1] for x in q.tuples() } if prefix: res =", "table_name=None): if table_name: self._meta.db_table = table_name self._meta.database.initialize(db_connection) def multiget(self, keys):", ": ttl, }) cls.insert_many(kvs).upsert().execute() def delete(self, key_or_keys): if list ==", "ttl = ttl or 0 kvs = [] for key,", "prefix=None, limit=None): cls = self.__class__ q = self.select(cls.key, cls.value) if", "if self.__db_type() == peewee.SqliteDatabase: wildcard = '*' else: wildcard =", "self.select(cls.key, cls.value) if prefix: if self.__db_type() == peewee.SqliteDatabase: wildcard =", "% (prefix, wildcard))) q = q.where(self.__ttl_filter()) if limit: q =", "the TTL where clause to a query, to filter out", "cls = self.__class__ return cls.ttl == 0 | (cls.mtime +", "v in res.items() } return res def __ttl_filter(self): \"\"\" Add", "out stale results \"\"\" ts = int(time()) cls = self.__class__", "keys & self.__ttl_filter()) \\ .tuples() return { x[0] : x[1]", "q = q.limit(limit) res = { x[0] : x[1] for", "k, v in res.items() } return res def __ttl_filter(self): \"\"\"", "x[0] : x[1] for x in res } @classmethod def", "} @classmethod def multiset(cls, data, ttl=None): ts = int(time()) ttl", "in res.items() } return res def __ttl_filter(self): \"\"\" Add the", "self._meta.database.initialize(db_connection) def multiget(self, keys): cls = self.__class__ res = self.select(cls.key,", "= peewee.CharField(max_length=256, unique=True) value = playhouse.kv.JSONField() mtime = peewee.IntegerField(default=time) ttl", "] cls = self.__class__ peewee.DeleteQuery(cls).where(cls.key << keys).execute() def list(self, prefix=None," ]
[ "Must be one of [50, 100, 152]'. format(num_layers)) return blocks", "bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth),", "A list of block configs. \"\"\" if num_layers == 50:", "i in range(num_units - 1)] def get_blocks(num_layers): \"\"\"Get block configs", "of ConvBlock layers in backbone. Raises: ValueError: `num_layers` must be", "self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x)", "(int): Output channels. num_units (int): Number of unit modules. stride", "1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),", "num_units, stride=2): \"\"\"Get a single block config. Args: in_channel (int):", "list: A list of block configs. \"\"\" if num_layers ==", "channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x):", "padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): \"\"\"Forward Function.\"\"\"", "get_blocks(num_layers): \"\"\"Get block configs of backbone. Args: num_layers (int): Number", "num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers", "depth, num_units, stride=2): \"\"\"Get a single block config. Args: in_channel", "bottleneck. Args: in_channel (int): Input channels. depth (int): Output channels.", "from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa \"\"\" # yapf: enable", "num_layers (int): Number of ConvBlock layers in backbone. Raises: ValueError:", "__init__(self, in_channel, depth, stride): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel", "blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128,", "x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res = self.res_layer(x) return", "= Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1,", "[ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36),", "Sequential, Sigmoid) # yapf: disable \"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)", "stride. \"\"\" def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if", "in backbone. Raises: ValueError: `num_layers` must be one of [50,", "must be one of [50, 100, 152]. Returns: list: A", "with SEModule. Args: in_channel (int): Input channels. depth (int): Output", "get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152: blocks =", "[50, 100, 152]'. format(num_layers)) return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules.", "= self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x =", "get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256,", "Output channels. num_units (int): Number of unit modules. stride (int,", "1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x): \"\"\"Forward function.\"\"\"", "of block configs. \"\"\" if num_layers == 50: blocks =", "(1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride,", "range(num_units - 1)] def get_blocks(num_layers): \"\"\"Get block configs of backbone.", "backbone. Args: num_layers (int): Number of ConvBlock layers in backbone.", "depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth,", "1, bias=False), BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut =", "return module_input * x class bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck.", "ReLU, Sequential, Sigmoid) # yapf: disable \"\"\" ArcFace implementation from", "PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)) def", "(3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3,", "= self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input", "['in_channel', 'depth', 'stride'])): \"\"\"A named tuple describing a ResNet block.\"\"\"", "bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))", "# noqa \"\"\" # yapf: enable class Flatten(Module): \"\"\"Flatten Module.\"\"\"", "of [50, 100, 152]. Returns: list: A list of block", "\"\"\" if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64,", "the norm across. Defaults to 1. Returns: Tensor: Tensor after", "self.sigmoid = Sigmoid() def forward(self, x): \"\"\"Forward Function.\"\"\" module_input =", "__init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 =", "config. Args: in_channel (int): Input channels. depth (int): Output channels.", "get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers ==", "= Conv2d( channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu", "1) for i in range(num_units - 1)] def get_blocks(num_layers): \"\"\"Get", "x): \"\"\"Forward Function.\"\"\" module_input = x x = self.avg_pool(x) x", "= torch.norm(input, 2, axis, True) output = torch.div(input, norm) return", "L2 normalization per-instance. \"\"\" norm = torch.norm(input, 2, axis, True)", "True) output = torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel',", "forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): \"\"\"l2 normalization.", "bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut", "\"\"\"A named tuple describing a ResNet block.\"\"\" def get_block(in_channel, depth,", "of input to calculate the norm across. Defaults to 1.", "- 1)] def get_blocks(num_layers): \"\"\"Get block configs of backbone. Args:", "unit modules' config. \"\"\" return [Bottleneck(in_channel, depth, stride) ] +", "axis=1): \"\"\"l2 normalization. Args: input (torch.Tensor): The input tensor. axis", "AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels // reduction, kernel_size=1, padding=0,", "per-instance. \"\"\" norm = torch.norm(input, 2, axis, True) output =", "(1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel,", "[50, 100, 152]. Returns: list: A list of block configs.", "list of block configs. \"\"\" if num_layers == 50: blocks", "self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction, channels,", "a single block config. Args: in_channel (int): Input channels. depth", "Number of ConvBlock layers in backbone. Raises: ValueError: `num_layers` must", "SEModule. Args: in_channel (int): Input channels. depth (int): Output channels.", "Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A named tuple describing a ResNet", "= ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction, channels, kernel_size=1,", "to 1. Returns: Tensor: Tensor after L2 normalization per-instance. \"\"\"", "\"\"\"Get block configs of backbone. Args: num_layers (int): Number of", "return res + shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck", "format(num_layers)) return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels (int):", "res = self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate", "in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer =", "class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels (int): Input channels. reduction", "__init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth:", "Resblock of bottleneck with SEModule. Args: in_channel (int): Input channels.", "Input channels. depth (int): Output channels. stride (int): Conv2d stride.", "\"\"\" def __init__(self, in_channel, depth, stride): \"\"\"Intermediate Resblock of bottleneck.", "number of layers: {}. Must be one of [50, 100,", "depth (int): Output channels. num_units (int): Number of unit modules.", "depth (int): Output channels. stride (int): Conv2d stride. \"\"\" def", "named tuple describing a ResNet block.\"\"\" def get_block(in_channel, depth, num_units,", "(int, optional): Conv2d stride. Defaults to 2. Returns: list: A", "class bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int): Input", "3), stride, 1, bias=False), BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward function.\"\"\"", "of [50, 100, 152]'. format(num_layers)) return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation", "50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4),", "depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif", "reduction ratio. \"\"\" def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool", "Conv2d( channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu =", "depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif", "= self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class", "(int): Number of unit modules. stride (int, optional): Conv2d stride.", "(int): Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__() if in_channel == depth:", "= self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock", "Conv2d stride. \"\"\" def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__()", "after L2 normalization per-instance. \"\"\" norm = torch.norm(input, 2, axis,", "(AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid) #", "BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid) # yapf:", "x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return", "self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else:", "depth=512, num_units=3) ] else: raise ValueError( 'Invalid number of layers:", "Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self, input): return input.view(input.size(0), -1) def", "res + shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck with", "depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def", "\"\"\" def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1)", "Input channels. reduction (int): Intermediate channels reduction ratio. \"\"\" def", "reduction (int): Intermediate channels reduction ratio. \"\"\" def __init__(self, channels,", "Args: in_channel (int): Input channels. depth (int): Output channels. stride", "across. Defaults to 1. Returns: Tensor: Tensor after L2 normalization", "get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256,", "blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128,", "(int): Conv2d stride. \"\"\" def __init__(self, in_channel, depth, stride): \"\"\"Intermediate", "self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x)", "(int): Number of ConvBlock layers in backbone. Raises: ValueError: `num_layers`", "'depth', 'stride'])): \"\"\"A named tuple describing a ResNet block.\"\"\" def", "padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels //", "of backbone. Args: num_layers (int): Number of ConvBlock layers in", "depth (int): Output channels. stride (int): Conv2d stride. \"\"\" super(bottleneck_IR,", "bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction,", "forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res = self.res_layer(x)", "+ shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck with SEModule.", "3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x):", "stride. \"\"\" super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer =", "kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): \"\"\"Forward", "super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride)", "tuple describing a ResNet block.\"\"\" def get_block(in_channel, depth, num_units, stride=2):", "3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3),", "input.view(input.size(0), -1) def l2_norm(input, axis=1): \"\"\"l2 normalization. Args: input (torch.Tensor):", "depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512,", "of unit modules. stride (int, optional): Conv2d stride. Defaults to", "get_block(in_channel, depth, num_units, stride=2): \"\"\"Get a single block config. Args:", "2. Returns: list: A list of unit modules' config. \"\"\"", "shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck with SEModule. Args:", "num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3)", "Output channels. stride (int): Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__() if", "reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d(", "100, 152]'. format(num_layers)) return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args:", "= AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels // reduction, kernel_size=1,", "Returns: Tensor: Tensor after L2 normalization per-instance. \"\"\" norm =", "+ [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]", "# isort:skip # noqa \"\"\" # yapf: enable class Flatten(Module):", "\"\"\" def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel", "of unit modules' config. \"\"\" return [Bottleneck(in_channel, depth, stride) ]", "= [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256,", "depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else:", "stride (int): Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__() if in_channel ==", "channels. reduction (int): Intermediate channels reduction ratio. \"\"\" def __init__(self,", "self.sigmoid(x) return module_input * x class bottleneck_IR(Module): \"\"\"Intermediate Resblock of", "BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res", "in_channel (int): Input channels. depth (int): Output channels. stride (int):", "of layers: {}. Must be one of [50, 100, 152]'.", "Defaults to 1. Returns: Tensor: Tensor after L2 normalization per-instance.", "Tensor: Tensor after L2 normalization per-instance. \"\"\" norm = torch.norm(input,", "`num_layers` must be one of [50, 100, 152]. Returns: list:", "(3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self,", "== 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128,", "torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A", "ConvBlock layers in backbone. Raises: ValueError: `num_layers` must be one", "Module, PReLU, ReLU, Sequential, Sigmoid) # yapf: disable \"\"\" ArcFace", "] + [Bottleneck(depth, depth, 1) for i in range(num_units -", "torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU,", "collections import namedtuple import torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d,", "axis of input to calculate the norm across. Defaults to", "configs. \"\"\" if num_layers == 50: blocks = [ get_block(in_channel=64,", "get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError( 'Invalid number of", "'stride'])): \"\"\"A named tuple describing a ResNet block.\"\"\" def get_block(in_channel,", "stride, 1, bias=False), BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut", "depth, 1) for i in range(num_units - 1)] def get_blocks(num_layers):", "\"\"\"Get a single block config. Args: in_channel (int): Input channels.", "Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)) def forward(self,", "in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer", "\"\"\" # yapf: enable class Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self,", "self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module):", "[ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14),", "{}. Must be one of [50, 100, 152]'. format(num_layers)) return", "block configs. \"\"\" if num_layers == 50: blocks = [", "\"\"\"Forward Function.\"\"\" module_input = x x = self.avg_pool(x) x =", "depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512,", "be one of [50, 100, 152]'. format(num_layers)) return blocks class", "BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut =", "self.fc2 = Conv2d( channels // reduction, channels, kernel_size=1, padding=0, bias=False)", "to 2. Returns: list: A list of unit modules' config.", "num_units=3) ] elif num_layers == 152: blocks = [ get_block(in_channel=64,", "self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth,", "\"\"\" super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1,", "Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer", "forward(self, x): \"\"\"Forward Function.\"\"\" module_input = x x = self.avg_pool(x)", "num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152: blocks", "bias=False), BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x)", "x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x)", "kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d( channels", "\"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels. depth", "= self.fc1(x) x = self.relu(x) x = self.fc2(x) x =", "Args: num_layers (int): Number of ConvBlock layers in backbone. Raises:", "elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3),", "== 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128,", "modules. stride (int, optional): Conv2d stride. Defaults to 2. Returns:", "= Conv2d( channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid", "axis, True) output = torch.div(input, norm) return output class Bottleneck(namedtuple('Block',", "Intermediate channels reduction ratio. \"\"\" def __init__(self, channels, reduction): super(SEModule,", "self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1),", "\"\"\"l2 normalization. Args: input (torch.Tensor): The input tensor. axis (int,", "// reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 =", "== 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128,", "bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels.", "100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13),", "of bottleneck with SEModule. Args: in_channel (int): Input channels. depth", "(int, optional): Specifies which axis of input to calculate the", "module_input = x x = self.avg_pool(x) x = self.fc1(x) x", "BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1,", "\"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res", "stride): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int): Input channels.", "PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth,", "def get_block(in_channel, depth, num_units, stride=2): \"\"\"Get a single block config.", "num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise", "norm across. Defaults to 1. Returns: Tensor: Tensor after L2", "norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A named", "if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3),", "\"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa \"\"\"", "module_input * x class bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck. Args:", "= Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer", "noqa \"\"\" # yapf: enable class Flatten(Module): \"\"\"Flatten Module.\"\"\" def", "function.\"\"\" shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res +", "class Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self, input): return input.view(input.size(0), -1)", "= self.fc2(x) x = self.sigmoid(x) return module_input * x class", "Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),", "disable \"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa", "1)] def get_blocks(num_layers): \"\"\"Get block configs of backbone. Args: num_layers", "= MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1,", "'Invalid number of layers: {}. Must be one of [50,", "stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride,", "def l2_norm(input, axis=1): \"\"\"l2 normalization. Args: input (torch.Tensor): The input", "152]. Returns: list: A list of block configs. \"\"\" if", "layers in backbone. Raises: ValueError: `num_layers` must be one of", "Raises: ValueError: `num_layers` must be one of [50, 100, 152].", "config. \"\"\" return [Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth, depth,", "Specifies which axis of input to calculate the norm across.", "calculate the norm across. Defaults to 1. Returns: Tensor: Tensor", "1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth,", "yapf: enable class Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self, input): return", "[ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30),", "self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of", "num_units (int): Number of unit modules. stride (int, optional): Conv2d", "layers: {}. Must be one of [50, 100, 152]'. format(num_layers))", "channels. stride (int): Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__() if in_channel", "reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self,", "A list of unit modules' config. \"\"\" return [Bottleneck(in_channel, depth,", "Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid) # yapf: disable", "= [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256,", "enable class Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self, input): return input.view(input.size(0),", "block configs of backbone. Args: num_layers (int): Number of ConvBlock", "else: raise ValueError( 'Invalid number of layers: {}. Must be", "one of [50, 100, 152]'. format(num_layers)) return blocks class SEModule(Module):", "import torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module,", "super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride)", "num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3)", "raise ValueError( 'Invalid number of layers: {}. Must be one", "Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer =", "if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer", "Sigmoid() def forward(self, x): \"\"\"Forward Function.\"\"\" module_input = x x", "channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2", "num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64,", "ReLU(inplace=True) self.fc2 = Conv2d( channels // reduction, channels, kernel_size=1, padding=0,", "Args: in_channel (int): Input channels. depth (int): Output channels. num_units", "= [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256,", "normalization per-instance. \"\"\" norm = torch.norm(input, 2, axis, True) output", "ratio. \"\"\" def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool =", "MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1),", "Output channels. stride (int): Conv2d stride. \"\"\" def __init__(self, in_channel,", "x = self.fc2(x) x = self.sigmoid(x) return module_input * x", "self.fc2(x) x = self.sigmoid(x) return module_input * x class bottleneck_IR(Module):", "namedtuple import torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d,", "of bottleneck. Args: in_channel (int): Input channels. depth (int): Output", "block config. Args: in_channel (int): Input channels. depth (int): Output", "Conv2d stride. Defaults to 2. Returns: list: A list of", "[Bottleneck(depth, depth, 1) for i in range(num_units - 1)] def", "input tensor. axis (int, optional): Specifies which axis of input", "num_units=3) ] else: raise ValueError( 'Invalid number of layers: {}.", "depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel),", "def forward(self, x): \"\"\"Forward Function.\"\"\" module_input = x x =", "l2_norm(input, axis=1): \"\"\"l2 normalization. Args: input (torch.Tensor): The input tensor.", "in_channel, depth, stride): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int):", "else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False),", "BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),", "stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16)) def forward(self, x): \"\"\"Forward", "class bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck with SEModule. Args: in_channel", "which axis of input to calculate the norm across. Defaults", "self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels // reduction,", "num_units=3) ] elif num_layers == 100: blocks = [ get_block(in_channel=64,", "shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut", "(int): Input channels. depth (int): Output channels. num_units (int): Number", "output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A named tuple describing", "modules' config. \"\"\" return [Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth,", "return [Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth, depth, 1) for", "depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel,", "2, axis, True) output = torch.div(input, norm) return output class", "a ResNet block.\"\"\" def get_block(in_channel, depth, num_units, stride=2): \"\"\"Get a", "num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError( 'Invalid number", "describing a ResNet block.\"\"\" def get_block(in_channel, depth, num_units, stride=2): \"\"\"Get", "depth, stride) ] + [Bottleneck(depth, depth, 1) for i in", "(int): Output channels. stride (int): Conv2d stride. \"\"\" super(bottleneck_IR, self).__init__()", "depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)) def forward(self, x):", "norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm)", "block.\"\"\" def get_block(in_channel, depth, num_units, stride=2): \"\"\"Get a single block", "MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid) # yapf: disable \"\"\"", "ValueError( 'Invalid number of layers: {}. Must be one of", "[Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth, depth, 1) for i", "from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU,", "[TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa \"\"\" # yapf: enable class", "stride. Defaults to 2. Returns: list: A list of unit", "Conv2d stride. \"\"\" def __init__(self, in_channel, depth, stride): \"\"\"Intermediate Resblock", "self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input *", "Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth,", "num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64,", "100, 152]. Returns: list: A list of block configs. \"\"\"", "\"\"\" norm = torch.norm(input, 2, axis, True) output = torch.div(input,", "channels. num_units (int): Number of unit modules. stride (int, optional):", "depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512,", "super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels", "import namedtuple import torch from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d,", "Args: input (torch.Tensor): The input tensor. axis (int, optional): Specifies", "unit modules. stride (int, optional): Conv2d stride. Defaults to 2.", "Defaults to 2. Returns: list: A list of unit modules'", "return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A named tuple", "channels. depth (int): Output channels. stride (int): Conv2d stride. \"\"\"", "get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ]", "The input tensor. axis (int, optional): Specifies which axis of", "(3, 3), stride, 1, bias=False), BatchNorm2d(depth)) def forward(self, x): \"\"\"Forward", "get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256,", "output = torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth',", "1. Returns: Tensor: Tensor after L2 normalization per-instance. \"\"\" norm", "self.fc1 = Conv2d( channels, channels // reduction, kernel_size=1, padding=0, bias=False)", "depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer =", "list: A list of unit modules' config. \"\"\" return [Bottleneck(in_channel,", "Input channels. depth (int): Output channels. num_units (int): Number of", "def forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res =", "-1) def l2_norm(input, axis=1): \"\"\"l2 normalization. Args: input (torch.Tensor): The", "torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential,", "get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError(", "] elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64,", "stride) ] + [Bottleneck(depth, depth, 1) for i in range(num_units", "optional): Specifies which axis of input to calculate the norm", "# yapf: disable \"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip", "channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid()", "PReLU, ReLU, Sequential, Sigmoid) # yapf: disable \"\"\" ArcFace implementation", "get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers ==", "torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output", "Sigmoid) # yapf: disable \"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) #", "= torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):", "def __init__(self, in_channel, depth, stride): \"\"\"Intermediate Resblock of bottleneck. Args:", "stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1,", "num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers", "depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100:", "num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3)", "def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel ==", "* x class bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel", "depth=512, num_units=3) ] elif num_layers == 100: blocks = [", "\"\"\"Intermediate Resblock of bottleneck with SEModule. Args: in_channel (int): Input", "bias=False) self.sigmoid = Sigmoid() def forward(self, x): \"\"\"Forward Function.\"\"\" module_input", "bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3),", "= Sigmoid() def forward(self, x): \"\"\"Forward Function.\"\"\" module_input = x", "SEModule(depth, 16)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x)", "input to calculate the norm across. Defaults to 1. Returns:", "16)) def forward(self, x): \"\"\"Forward function.\"\"\" shortcut = self.shortcut_layer(x) res", "isort:skip # noqa \"\"\" # yapf: enable class Flatten(Module): \"\"\"Flatten", "def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1", "tensor. axis (int, optional): Specifies which axis of input to", "depth, stride): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int): Input", "(int): Input channels. depth (int): Output channels. stride (int): Conv2d", "x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x", "Module.\"\"\" def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1):", "backbone. Raises: ValueError: `num_layers` must be one of [50, 100,", "Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16))", "x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x", "reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels,", "Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential(", "depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError( 'Invalid", "in_channel (int): Input channels. depth (int): Output channels. num_units (int):", "stride (int, optional): Conv2d stride. Defaults to 2. Returns: list:", "axis (int, optional): Specifies which axis of input to calculate", "stride (int): Conv2d stride. \"\"\" def __init__(self, in_channel, depth, stride):", "from collections import namedtuple import torch from torch.nn import (AdaptiveAvgPool2d,", "Tensor after L2 normalization per-instance. \"\"\" norm = torch.norm(input, 2,", "num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64,", "stride. \"\"\" def __init__(self, in_channel, depth, stride): \"\"\"Intermediate Resblock of", "channels reduction ratio. \"\"\" def __init__(self, channels, reduction): super(SEModule, self).__init__()", "1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1,", "Modules. Args: channels (int): Input channels. reduction (int): Intermediate channels", "input (torch.Tensor): The input tensor. axis (int, optional): Specifies which", "ResNet block.\"\"\" def get_block(in_channel, depth, num_units, stride=2): \"\"\"Get a single", "152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8),", "yapf: disable \"\"\" ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip #", "] elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64,", "\"\"\"Flatten Module.\"\"\" def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input,", "bottleneck with SEModule. Args: in_channel (int): Input channels. depth (int):", "Returns: list: A list of unit modules' config. \"\"\" return", "elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3),", "normalization. Args: input (torch.Tensor): The input tensor. axis (int, optional):", "152]'. format(num_layers)) return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels", "x class bottleneck_IR(Module): \"\"\"Intermediate Resblock of bottleneck. Args: in_channel (int):", "one of [50, 100, 152]. Returns: list: A list of", "input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): \"\"\"l2 normalization. Args:", "num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100: blocks", "self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d( channels, channels //", "configs of backbone. Args: num_layers (int): Number of ConvBlock layers", "(int): Input channels. reduction (int): Intermediate channels reduction ratio. \"\"\"", "for i in range(num_units - 1)] def get_blocks(num_layers): \"\"\"Get block", "return input.view(input.size(0), -1) def l2_norm(input, axis=1): \"\"\"l2 normalization. Args: input", "depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152:", "\"\"\" return [Bottleneck(in_channel, depth, stride) ] + [Bottleneck(depth, depth, 1)", "ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa \"\"\" #", "== depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential(", "x = self.sigmoid(x) return module_input * x class bottleneck_IR(Module): \"\"\"Intermediate", "(int): Conv2d stride. \"\"\" def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE,", "stride, bias=False), BatchNorm2d(depth)) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3,", "SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels (int): Input channels. reduction (int):", "channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True)", "be one of [50, 100, 152]. Returns: list: A list", "return blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels (int): Input", "single block config. Args: in_channel (int): Input channels. depth (int):", "Returns: list: A list of block configs. \"\"\" if num_layers", "optional): Conv2d stride. Defaults to 2. Returns: list: A list", "import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d, Module, PReLU, ReLU, Sequential, Sigmoid)", "Number of unit modules. stride (int, optional): Conv2d stride. Defaults", "channels. stride (int): Conv2d stride. \"\"\" def __init__(self, in_channel, depth,", "Conv2d( channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid =", "self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))", "class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): \"\"\"A named tuple describing a", "depth=512, num_units=3) ] elif num_layers == 152: blocks = [", "] else: raise ValueError( 'Invalid number of layers: {}. Must", "blocks class SEModule(Module): \"\"\"Squeeze-and-Excitation Modules. Args: channels (int): Input channels.", "def get_blocks(num_layers): \"\"\"Get block configs of backbone. Args: num_layers (int):", "implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa \"\"\" # yapf:", "\"\"\"Squeeze-and-Excitation Modules. Args: channels (int): Input channels. reduction (int): Intermediate", "channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d(", "= x x = self.avg_pool(x) x = self.fc1(x) x =", "ValueError: `num_layers` must be one of [50, 100, 152]. Returns:", "# yapf: enable class Flatten(Module): \"\"\"Flatten Module.\"\"\" def forward(self, input):", "Args: channels (int): Input channels. reduction (int): Intermediate channels reduction", "Function.\"\"\" module_input = x x = self.avg_pool(x) x = self.fc1(x)", "get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ]", "def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): \"\"\"l2", "get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ]", "stride=2): \"\"\"Get a single block config. Args: in_channel (int): Input", "(int): Intermediate channels reduction ratio. \"\"\" def __init__(self, channels, reduction):", "= self.sigmoid(x) return module_input * x class bottleneck_IR(Module): \"\"\"Intermediate Resblock", "bottleneck_IR_SE(Module): \"\"\"Intermediate Resblock of bottleneck with SEModule. Args: in_channel (int):", "to calculate the norm across. Defaults to 1. Returns: Tensor:", "// reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def", "channels (int): Input channels. reduction (int): Intermediate channels reduction ratio.", "(torch.Tensor): The input tensor. axis (int, optional): Specifies which axis", "Resblock of bottleneck. Args: in_channel (int): Input channels. depth (int):", "(int): Output channels. stride (int): Conv2d stride. \"\"\" def __init__(self,", "list of unit modules' config. \"\"\" return [Bottleneck(in_channel, depth, stride)", "get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100: blocks =", "channels. depth (int): Output channels. num_units (int): Number of unit", "blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128,", "in range(num_units - 1)] def get_blocks(num_layers): \"\"\"Get block configs of" ]
[ "playlist playlist_name = input(\"\\nWhat's the playlist name? \") playlist =", "tracks you'd like to use as seeds. Use indexes separated", "are the recommended tracks which will be included in your", "create playlist playlist_name = input(\"\\nWhat's the playlist name? \") playlist", "track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose which tracks to", "input(\"\\nWhat's the playlist name? \") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}'", "get last played tracks num_tracks_to_visualise = int(input(\"How many tracks would", "input(\"\\nEnter a list of up to 5 tracks you'd like", "seed_tracks = [last_played_tracks[int(index)-1] for index in indexes] # get recommended", "playlist_name = input(\"\\nWhat's the playlist name? \") playlist = spotify_client.create_playlist(playlist_name)", "from spotifyclient import SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\"))", "os.getenv(\"SPOTIFY_USER_ID\")) # get last played tracks num_tracks_to_visualise = int(input(\"How many", "a space: \") indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for", "= spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created successfully.\") # populate playlist", "to visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last", "recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded to playlist", "get playlist name from user and create playlist playlist_name =", "playlist name from user and create playlist playlist_name = input(\"\\nWhat's", "spotifyclient import SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) #", "print(\"\\nHere are the recommended tracks which will be included in", "print(f\"\\nHere are the last {num_tracks_to_visualise} tracks you listened to on", "as a seed to generate a playlist indexes = input(\"\\nEnter", "created successfully.\") # populate playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks)", "name? \") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created successfully.\")", "you listened to on Spotify:\") for index, track in enumerate(last_played_tracks):", "[last_played_tracks[int(index)-1] for index in indexes] # get recommended tracks based", "successfully.\") # populate playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended", "SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last played tracks num_tracks_to_visualise = int(input(\"How", "\")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last {num_tracks_to_visualise} tracks", "= input(\"\\nEnter a list of up to 5 tracks you'd", "in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose which tracks to use", "5 tracks you'd like to use as seeds. Use indexes", "on Spotify:\") for index, track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") #", "in your new playlist:\") for index, track in enumerate(recommended_tracks): print(f\"{index+1}-", "seeds. Use indexes separated by a space: \") indexes =", "spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last {num_tracks_to_visualise} tracks you listened to", "playlist indexes = input(\"\\nEnter a list of up to 5", "recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended tracks which will", "a playlist indexes = input(\"\\nEnter a list of up to", "= [last_played_tracks[int(index)-1] for index in indexes] # get recommended tracks", "for index, track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose which", "indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index in indexes] # get", "use as seeds. Use indexes separated by a space: \")", "many tracks would you like to visualise? \")) last_played_tracks =", "track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get playlist name from", "spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.\") if", "you'd like to use as seeds. Use indexes separated by", "by a space: \") indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1]", "list of up to 5 tracks you'd like to use", "index, track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get playlist name", "= SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last played tracks num_tracks_to_visualise =", "user and create playlist playlist_name = input(\"\\nWhat's the playlist name?", "enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose which tracks to use as", "<reponame>mahi0601/SpotifyPlaylist import os from spotifyclient import SpotifyClient def main(): spotify_client", "visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last {num_tracks_to_visualise}", "tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended tracks which", "to use as seeds. Use indexes separated by a space:", "print(f\"\\nPlaylist '{playlist.name}' was created successfully.\") # populate playlist with recommended", "played tracks num_tracks_to_visualise = int(input(\"How many tracks would you like", "indexes separated by a space: \") indexes = indexes.split() seed_tracks", "use as a seed to generate a playlist indexes =", "last played tracks num_tracks_to_visualise = int(input(\"How many tracks would you", "= indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index in indexes] #", "off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended", "up to 5 tracks you'd like to use as seeds.", "last {num_tracks_to_visualise} tracks you listened to on Spotify:\") for index,", "\") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created successfully.\") #", "in indexes] # get recommended tracks based off seed tracks", "import os from spotifyclient import SpotifyClient def main(): spotify_client =", "be included in your new playlist:\") for index, track in", "of up to 5 tracks you'd like to use as", "def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last played", "seed to generate a playlist indexes = input(\"\\nEnter a list", "you like to visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are", "{num_tracks_to_visualise} tracks you listened to on Spotify:\") for index, track", "SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last", "populate playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully", "'{playlist.name}' was created successfully.\") # populate playlist with recommended tracks", "import SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get", "main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last played tracks", "which will be included in your new playlist:\") for index,", "\") indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index in", "get recommended tracks based off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks)", "new playlist:\") for index, track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") #", "indexes] # get recommended tracks based off seed tracks recommended_tracks", "int(input(\"How many tracks would you like to visualise? \")) last_played_tracks", "from user and create playlist playlist_name = input(\"\\nWhat's the playlist", "for index in indexes] # get recommended tracks based off", "playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created successfully.\") # populate", "# get last played tracks num_tracks_to_visualise = int(input(\"How many tracks", "tracks to use as a seed to generate a playlist", "print(f\"{index+1}- {track}\") # choose which tracks to use as a", "= input(\"\\nWhat's the playlist name? \") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist", "seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended tracks", "the last {num_tracks_to_visualise} tracks you listened to on Spotify:\") for", "num_tracks_to_visualise = int(input(\"How many tracks would you like to visualise?", "the recommended tracks which will be included in your new", "which tracks to use as a seed to generate a", "to use as a seed to generate a playlist indexes", "print(f\"{index+1}- {track}\") # get playlist name from user and create", "a list of up to 5 tracks you'd like to", "with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded to", "playlist name? \") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created", "index, track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose which tracks", "last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last {num_tracks_to_visualise} tracks you", "like to use as seeds. Use indexes separated by a", "to generate a playlist indexes = input(\"\\nEnter a list of", "# get playlist name from user and create playlist playlist_name", "choose which tracks to use as a seed to generate", "# populate playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks", "recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.\") if __name__", "= int(input(\"How many tracks would you like to visualise? \"))", "tracks which will be included in your new playlist:\") for", "Use indexes separated by a space: \") indexes = indexes.split()", "and create playlist playlist_name = input(\"\\nWhat's the playlist name? \")", "indexes = input(\"\\nEnter a list of up to 5 tracks", "spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended tracks which will be included", "recommended tracks which will be included in your new playlist:\")", "recommended tracks based off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere", "tracks you listened to on Spotify:\") for index, track in", "to 5 tracks you'd like to use as seeds. Use", "separated by a space: \") indexes = indexes.split() seed_tracks =", "playlist:\") for index, track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get", "= spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the last {num_tracks_to_visualise} tracks you listened", "Spotify:\") for index, track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\") # choose", "for index, track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get playlist", "space: \") indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index", "playlist with recommended tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded", "to on Spotify:\") for index, track in enumerate(last_played_tracks): print(f\"{index+1}- {track}\")", "like to visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere are the", "# choose which tracks to use as a seed to", "your new playlist:\") for index, track in enumerate(recommended_tracks): print(f\"{index+1}- {track}\")", "name from user and create playlist playlist_name = input(\"\\nWhat's the", "generate a playlist indexes = input(\"\\nEnter a list of up", "spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was created successfully.\") # populate playlist with", "{track}\") # choose which tracks to use as a seed", "index in indexes] # get recommended tracks based off seed", "enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get playlist name from user and", "print(f\"\\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.\") if __name__ ==", "os from spotifyclient import SpotifyClient def main(): spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"),", "in enumerate(recommended_tracks): print(f\"{index+1}- {track}\") # get playlist name from user", "tracks spotify_client.populate_playlist(playlist, recommended_tracks) print(f\"\\nRecommended tracks successfully uploaded to playlist '{playlist.name}'.\")", "included in your new playlist:\") for index, track in enumerate(recommended_tracks):", "will be included in your new playlist:\") for index, track", "as seeds. Use indexes separated by a space: \") indexes", "{track}\") # get playlist name from user and create playlist", "successfully uploaded to playlist '{playlist.name}'.\") if __name__ == \"__main__\": main()", "listened to on Spotify:\") for index, track in enumerate(last_played_tracks): print(f\"{index+1}-", "tracks successfully uploaded to playlist '{playlist.name}'.\") if __name__ == \"__main__\":", "a seed to generate a playlist indexes = input(\"\\nEnter a", "# get recommended tracks based off seed tracks recommended_tracks =", "the playlist name? \") playlist = spotify_client.create_playlist(playlist_name) print(f\"\\nPlaylist '{playlist.name}' was", "tracks would you like to visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise)", "indexes = indexes.split() seed_tracks = [last_played_tracks[int(index)-1] for index in indexes]", "tracks based off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are", "was created successfully.\") # populate playlist with recommended tracks spotify_client.populate_playlist(playlist,", "= spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the recommended tracks which will be", "are the last {num_tracks_to_visualise} tracks you listened to on Spotify:\")", "based off seed tracks recommended_tracks = spotify_client.get_track_recommendations(seed_tracks) print(\"\\nHere are the", "tracks num_tracks_to_visualise = int(input(\"How many tracks would you like to", "would you like to visualise? \")) last_played_tracks = spotify_client.get_last_played_tracks(num_tracks_to_visualise) print(f\"\\nHere", "spotify_client = SpotifyClient(os.getenv(\"SPOTIFY_AUTHORIZATION_TOKEN\"), os.getenv(\"SPOTIFY_USER_ID\")) # get last played tracks num_tracks_to_visualise" ]
[ "200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name,", "'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service,", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request',", "# Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\"", "-*- from ddtrace.compat import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from", "spans = self.get_spans() if flask_version >= (0, 12, 0): self.assertEqual(len(spans),", "and globally trace search is enabled We expect the root", "self.assertEqual(len(spans), 8) # Assert the order of the spans created", "# Assert the order of the spans created self.assertListEqual( [", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response',", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200)", "u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\" When making a request", "'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not", "def index(): return 'Hello Flask', 200 # Default: distributed tracing", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))", "a query string We create the expected spans \"\"\" @self.app.route('/')", "enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', })", "raises an exception We create the expected spans \"\"\" @self.app.errorhandler(500)", "the url contains unicode We create the expected spans \"\"\"", "are present We create the expected spans \"\"\" @self.app.route('/') def", "the requested endpoint calls `abort(404)` We create the expected spans", "self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\"", "'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') #", "string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no query", "'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) #", "rate is not set and globally trace search is enabled", "'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) #", "create the expected spans \"\"\" @self.app.route('/not-found') def not_found(): abort(404) res", "self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING) ==", "'flask.request') # Note: contains no query string self.assertEqual(req_span.resource, 'GET /')", "self.assertEqual(len(spans), 11) # Assert the order of the spans created", "test_request(self): \"\"\" When making a request We create the expected", "Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When making a", "request contains a query string We create the expected spans", "event sample rate is not set and globally trace search", "'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], )", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch", "self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask')", "fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data,", "'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span,", "from flask import abort from . import BaseFlaskTestCase from ...utils", "span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request')", "test_request_query_string(self): \"\"\" When making a request When the request contains", "not set and globally trace search is enabled We expect", "1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\"", "search is enabled and sample rate is set and globally", "contains a query string We create the expected spans \"\"\"", "spans], ) else: self.assertEqual(len(spans), 10) # Assert the order of", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error'))", "'werkzeug.exceptions.NotFound') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name,", "span in spans: self.assertEqual(span.service, 'flask') # Root request span req_span", "sample rate is not set and globally trace search is", "'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') #", "the requested endpoint was not found We create the expected", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501')", "query string.\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 with", "HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort from . import BaseFlaskTestCase", "self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed tracing with", "'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask')", "self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) #", "'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span =", "res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans), 10)", "self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY:", "in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self):", "@self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with", "200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert", "'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed tracing", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make sure", "self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask')", "= spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1)", "ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import", "res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans), 9)", "self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'),", "self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET')", "When making a request When distributed tracing headers are present", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0)", "'flask.do_teardown_appcontext', ], [s.name for s in spans], ) else: self.assertEqual(len(spans),", "0) def test_request_query_string_trace(self): \"\"\"Make sure when making a request that", "def test_request_error_handler(self): \"\"\" When making a request When the requested", "12345) # Explicitly enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res", "self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches(", "spans \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 res", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') #", "is disabled We expect the root span to have the", "root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in", "= self.get_spans() if flask_version >= (0, 12, 0): self.assertEqual(len(spans), 11)", "root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345)", "contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler", "'12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') #", "fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans", "== root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When making a", "spans \"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans()", "and sample rate is set and globally trace search is", "Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, },", "set and sample rate is set and globally trace search", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in req_span.meta #", "self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\" When making a", "Note: contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) #", "Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When", "@self.app.route('/') def index(): return 'Hello Flask', 200 # Default: distributed", "We create the expected spans \"\"\" @self.app.route('/not-found') def not_found(): abort(404)", "[s.name for s in spans], ) # Assert span services", "self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) #", "an integration trace search is enabled and sample rate is", "\"\"\" @self.app.route('/500') def fivehundred(): raise Exception('500 error') res = self.client.get('/500')", "self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\" When making a request When", "Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order", "self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make sure when making a request", "appropriate tag \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def", "\"\"\" When making a request When the request contains a", "404) # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name,", "404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span =", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "create the expected spans \"\"\" @self.app.errorhandler(500) def error_handler(e): return 'Whoops',", "self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\" When making a request When", "return 'Whoops', 500 @self.app.route('/500') def fivehundred(): raise Exception('500 error') res", "\"\"\" When making a request When the requested endpoint calls", "continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When making a request When", "trace search is disabled We expect the root span to", "return 'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans =", "in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self):", "'/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no query string self.assertEqual(req_span.get_tag(http.URL),", "self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))", "calls `abort(404)` We create the expected spans \"\"\" @self.app.route('/not-found') def", "# Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'),", "self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self):", "error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans),", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501", "span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When making", "query string We create the expected spans \"\"\" @self.app.route('/') def", "res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans", "'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'),", "200 # Default: distributed tracing enabled res = self.client.get('/', headers={", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500", "self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910',", "def test_request_500(self): \"\"\" When making a request When the requested", "'/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make sure when making a", "in spans], ) else: self.assertEqual(len(spans), 10) # Assert the order", "no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error,", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web')", "def test_request_501(self): \"\"\" When making a request When the requested", "'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\" When making", "spans], ) # Assert span services for span in spans:", "'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) #", "index(): return 'Hello Flask', 200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code,", "spans: self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0]", "trace search is not set and sample rate is set", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index')", "Assert parent and trace id are properly set on the", "string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span handler_span =", "'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\" When making", "1.0, }, ) for span in self.spans: if span ==", "res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 9)", "self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200)", "ddtrace.ext import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask", "requested endpoint calls `abort(404)` We create the expected spans \"\"\"", "contains unicode We create the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def", "from ...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2: base_exception_name", "'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When", "return 'Hello Flask', 200 # Default: distributed tracing enabled res", "making a request We create the expected spans \"\"\" @self.app.route('/')", "span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET", "'GET') # Note: contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span,", "res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans =", "return 'Hello Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "\"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans),", "Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span = spans[4]", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found')", "self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "making a request When an integration trace search is enabled", "Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res =", "on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910)", "the expected spans \"\"\" @self.app.route('/not-found') def not_found(): abort(404) res =", "When an integration trace search is enabled and sample rate", "name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span in self.spans:", "is set and globally trace search is enabled We expect", "distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID:", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500')", "1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span", "unicode We create the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode():", "of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request',", "span to have the appropriate tag \"\"\" @self.app.route('/') def index():", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type,", "self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert", "/500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'),", "an integration trace search is not set and sample rate", "with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "Note: contains no query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web')", "def test_request_abort_404(self): \"\"\" When making a request When the requested", "root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span in", "a request When the url contains unicode We create the", "trace search is enabled We expect the root span to", "in spans], ) # Assert span services for span in", "rate is set and globally trace search is enabled We", "\"\"\" @self.app.route('/501') def fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501)", "import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version", "We create the expected spans \"\"\" @self.app.route('/') def index(): return", "span to not include tag \"\"\" @self.app.route('/') def index(): return", "analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root", "search is not set and sample rate is set and", "is set and globally trace search is disabled We expect", "= spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1)", "globally trace search is enabled We expect the root span", "request When an integration trace search is not event sample", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains no query string", "query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no", "self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask')", "contains no query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span", "span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When making", "u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē')", "/') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'),", "Flask', 200 # Default: distributed tracing enabled res = self.client.get('/',", "tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span,", "metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in self.spans: if", "# Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode')", "self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910',", "'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import", "'/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not", "self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed", "spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500", "Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span =", "404) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the order", "@self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code,", "'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'),", "tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'),", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1)", "a request When the requested endpoint calls `abort(404)` We create", "0): self.assertEqual(len(spans), 11) # Assert the order of the spans", "self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert", "self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When making a request When an", "'Hello Flask', 200 # Default: distributed tracing enabled res =", "self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\" When making a request", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error,", "Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found')", "'/500') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name,", "'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no query string", "user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error,", "self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span", "self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the", "and capture the query string.\"\"\" @self.app.route('/') def index(): return 'Hello", "self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User", "the expected spans \"\"\" @self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500", "'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name)", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self):", "678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed tracing with self.override_config('flask',", "self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request", "self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY))", "= self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans),", "self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē')", "'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') #", "if PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\"", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'),", "disabled We expect the root span to have the appropriate", "u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "# Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name,", "test_distributed_tracing(self): \"\"\" When making a request When distributed tracing headers", "Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')", "/not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'),", "= self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if", "b'Hello Flask') # Assert parent and trace id are properly", "on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910)", "'flask.do_teardown_appcontext', ], [s.name for s in spans], ) # Assert", "'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "(0, 12, 0): self.assertEqual(len(spans), 11) # Assert the order of", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource,", "b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5,", "'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When making a request When", "making a request that we create the expected spans and", "# Note: contains no query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type,", "Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span", "test_request_404(self): \"\"\" When making a request When the requested endpoint", "making a request When the requested endpoint calls `abort(501)` We", "\"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē')", "When making a request We create the expected spans \"\"\"", "requested endpoint calls `abort(501)` We create the expected spans \"\"\"", "'index') # Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'),", "'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented'))", "span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note:", "'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask')", "With distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/',", "'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "= self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans), 9) #", "self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make sure when making", "\"\"\" When making a request When distributed tracing headers are", "a request that we create the expected spans and capture", "'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) #", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web')", "# Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index')", "Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When making", "ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span in self.spans: if span", "self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request", "index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True,", "\"\"\" When making a request We create the expected spans", "'/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User", "678910) self.assertEqual(span.parent_id, 12345) # With distributed tracing disabled with self.override_config('flask',", "500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if flask_version >= (0,", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0)", "def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res =", "Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "request We create the expected spans \"\"\" @self.app.route('/') def index():", "expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200 res", "create the expected spans \"\"\" @self.app.route('/') def index(): return 'Hello", "if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When", "query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span handler_span", "with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code,", "import BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception' if", "'/not-found') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name,", "When making a request When the requested endpoint calls `abort(404)`", "self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\"", "tracing headers are present We create the expected spans \"\"\"", "200 res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans", "abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans),", "tracing enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910',", "spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500", "\"\"\" @self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500 @self.app.route('/500') def fivehundred():", "fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans()", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id)", "assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span", "'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback'))", "Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch", "ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort from .", "root span to have the appropriate tag \"\"\" @self.app.route('/') def", "services for span in spans: self.assertEqual(span.service, 'flask') # Root request", "create the expected spans and capture the query string.\"\"\" @self.app.route('/')", "= self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for", "with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name)", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/')", "501) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the order", "'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'),", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains no", "index(): return 'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans", "from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import http from ddtrace.propagation.http", "def index(): return 'Hello Flask', 200 res = self.client.get('/', query_string=dict(hello='flask'))", "set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id,", "create the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē',", "http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort", "self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With distributed tracing disabled with", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no query string self.assertEqual(handler_span.resource,", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no", "Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When making a", "@self.app.route('/500') def fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code,", "spans \"\"\" @self.app.route('/501') def fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code,", "200) assert http.QUERY_STRING not in req_span.meta # Handler span handler_span", "# Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found')", "the root span to not include tag \"\"\" @self.app.route('/') def", "'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500)", "@self.app.route('/') def index(): return 'Hello Flask', 200 res = self.client.get('/')", "@self.app.route('/not-found') def not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans", "= self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 10) #", "self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not", "\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)):", "u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span", "Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span =", "search is enabled We expect the root span to have", "a request When an integration trace search is enabled and", "base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When making", "# Assert parent and trace id are properly set on", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET')", "self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans), 8) #", "[s.name for s in spans], ) else: self.assertEqual(len(spans), 10) #", "endpoint was not found We create the expected spans \"\"\"", "self.assertEqual(span.service, 'flask') # Root request span req_span = spans[0] self.assertEqual(req_span.service,", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make", "# Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred')", "spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of", "trace id are properly set on the root span span", "spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the order of", "assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception' class", "'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name)", "self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if span == root:", "= self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans), 10) #", "0) def test_request_error_handler(self): \"\"\" When making a request When the", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self):", "with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code,", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span = spans[3]", "if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When", "When making a request When an integration trace search is", "Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone')", "200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans),", "ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in self.spans: if span", "PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When", "self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace", "678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When making a request When", "= self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span()", "self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch", "string.\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_http_config('flask',", "@self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)):", "'Hello Flask', 200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) #", "= self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 9) #", "self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When making a request When an", "8) # Assert the order of the spans created self.assertListEqual(", "query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\" When", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id)", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch", "We create the expected spans \"\"\" @self.app.route('/501') def fivehundredone(): abort(501)", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception',", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0)", "self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span", "error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans =", "= self.get_spans() self.assertEqual(len(spans), 9) # Assert the order of the", "for s in spans], ) else: self.assertEqual(len(spans), 10) # Assert", "Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans", "search is disabled We expect the root span to not", "'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When making a request", "request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource,", "is not event sample rate is not set and globally", "'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'),", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response',", "raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans =", "self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\" When making a", "are properly set on the root span span = self.find_span_by_name(self.get_spans(),", "self.assertEqual(span.parent_id, 12345) # Explicitly enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)):", "'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found'))", "'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span", "endpoint raises an exception We create the expected spans \"\"\"", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id)", "When making a request When the request contains a query", "request When the requested endpoint raises an exception We create", "spans \"\"\" @self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500 @self.app.route('/500') def", "continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When making a request When", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1)", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception',", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/')", "assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask')", "query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id)", "enabled We expect the root span to have the appropriate", "\"\"\"Make sure when making a request that we create the", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\"", "def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask',", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource,", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When making a request", "= self.get_spans() self.assertEqual(len(spans), 10) # Assert the order of the", "span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When making", "from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort from", "'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When making a request When an", "# Default: distributed tracing enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID:", "contains no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self):", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501')", "s in spans], ) # Assert span services for span", "self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With distributed tracing", "index(): return 'Hello Flask', 200 # Default: distributed tracing enabled", "enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={", "the request contains a query string We create the expected", "# Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no query", "# Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET')", "'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in req_span.meta", "for span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY))", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type,", "test_request_501(self): \"\"\" When making a request When the requested endpoint", "'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'),", "spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def", "if flask_version >= (0, 12, 0): self.assertEqual(len(spans), 11) # Assert", "], [s.name for s in spans], ) else: self.assertEqual(len(spans), 10)", "include tag \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200", "'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found',", "'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\" When making a request", "unicode(): return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span handler_span =", "ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import http from ddtrace.propagation.http import", "self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When making a request When distributed", "self.assertEqual(res.status_code, 500) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the", "'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') #", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200)", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response',", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2:", "assert http.QUERY_STRING not in req_span.meta # Handler span handler_span =", "exception We create the expected spans \"\"\" @self.app.errorhandler(500) def error_handler(e):", "not found We create the expected spans \"\"\" res =", "We create the expected spans \"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code,", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "flask import abort from . import BaseFlaskTestCase from ...utils import", "def unicode(): return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200)", "# Note: contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200)", "'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "test_request_error_handler(self): \"\"\" When making a request When the requested endpoint", "parent and trace id are properly set on the root", "HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "return 'üŋïĉóđē', 200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93')", "'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When making a request When the", "'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span", "spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404", "self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans), 8)", "a request When the requested endpoint was not found We", "the expected spans \"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans", "'werkzeug.exceptions.NotImplemented') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name,", "def test_request_query_string_trace(self): \"\"\"Make sure when making a request that we", "span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\"", "self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no query string", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))", "9) # Assert the order of the spans created self.assertListEqual(", "500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span =", "self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\"", "dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error,", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error,", "is not set and sample rate is set and globally", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback'))", "0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'),", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0)", "When making a request When the url contains unicode We", "self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\" When", "request When an integration trace search is not set and", "'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span =", "'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404)", "span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error, 1)", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request',", "endpoint calls `abort(501)` We create the expected spans \"\"\" @self.app.route('/501')", "not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans()", "'/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\" When making a request", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "integration trace search is not set and sample rate is", "self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When making a request", "/üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'),", "expected spans and capture the query string.\"\"\" @self.app.route('/') def index():", "expected spans \"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans =", "'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self):", "making a request When the requested endpoint raises an exception", "abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans),", "root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if span", "\"\"\" @self.app.route('/not-found') def not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404)", "test_request_query_string_trace(self): \"\"\"Make sure when making a request that we create", "from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext", "request that we create the expected spans and capture the", "a request When an integration trace search is not set", "Flask', 200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "= self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred',", "self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\" When making", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web')", "with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID:", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no query string self.assertEqual(handler_span.resource, '/')", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē')", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "def test_request_404(self): \"\"\" When making a request When the requested", "a request When the request contains a query string We", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 # Default:", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains no query", "set and globally trace search is enabled We expect the", "When making a request When the requested endpoint raises an", "error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span = spans[4]", "'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL),", "ddtrace.compat import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode',", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource,", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request',", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1)", "capture the query string.\"\"\" @self.app.route('/') def index(): return 'Hello Flask',", "the query string.\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200", "root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, )", "We create the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return", "'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "def test_request_unicode(self): \"\"\" When making a request When the url", "200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span", "spans = self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz'", "self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler", "sample rate is set and globally trace search is disabled", "'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "request When the requested endpoint calls `abort(404)` We create the", "import abort from . import BaseFlaskTestCase from ...utils import assert_span_http_status_code", "0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains", "501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span =", "Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if", "self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request", "self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET')", "from ddtrace.compat import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch", "self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501')", "FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When making a request We create", "'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) #", "tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'),", "Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource,", "coding: utf-8 -*- from ddtrace.compat import PY2 from ddtrace.constants import", "self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "the expected spans \"\"\" @self.app.route('/500') def fivehundred(): raise Exception('500 error')", "'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "@self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res", "request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') #", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404", "self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/')", "requested endpoint was not found We create the expected spans", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500') self.assertEqual(handler_span.error,", "tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345',", "self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not", "trace search is enabled and sample rate is set and", "assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When making a", "= self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly enable", "spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When making a request", "assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in req_span.meta # Handler span", "with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request tags", "name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for span in self.spans:", "self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request", "self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\" When making a request When", "endpoint calls `abort(404)` We create the expected spans \"\"\" @self.app.route('/not-found')", "assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span", "'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception')", "self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200)", "self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\" When making a", "res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans()", "handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error,", ") else: self.assertEqual(len(spans), 10) # Assert the order of the", "dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING)", "= self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of the", "from . import BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name =", "11) # Assert the order of the spans created self.assertListEqual(", "self.get_spans() # Request tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self):", "set and globally trace search is disabled We expect the", "a request When an integration trace search is not event", "When the url contains unicode We create the expected spans", "'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_query_string_trace(self): \"\"\"Make sure when", "self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span", "expected spans \"\"\" @self.app.route('/501') def fivehundredone(): abort(501) res = self.client.get('/501')", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone',", "self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for", "self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When making a request When", "self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span", "self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the", "Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "to have the appropriate tag \"\"\" @self.app.route('/') def index(): return", "and globally trace search is disabled We expect the root", "When the requested endpoint calls `abort(501)` We create the expected", "'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], ) else:", "'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200)", "root span to not include tag \"\"\" @self.app.route('/') def index():", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span = spans[3]", "metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0, }, ) for span in self.spans: if", "set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id,", "id are properly set on the root span span =", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET')", "'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self):", "self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When", "not in req_span.meta # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "not event sample rate is not set and globally trace", "an integration trace search is not event sample rate is", "When an integration trace search is not set and sample", "b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans:", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501')", "create the expected spans \"\"\" @self.app.route('/501') def fivehundredone(): abort(501) res", "-*- coding: utf-8 -*- from ddtrace.compat import PY2 from ddtrace.constants", "string We create the expected spans \"\"\" @self.app.route('/') def index():", "test_analytics_global_on_integration_default(self): \"\"\" When making a request When an integration trace", "self.assertEqual(req_span.name, 'flask.request') # Note: contains no query string self.assertEqual(req_span.resource, 'GET", "exception We create the expected spans \"\"\" @self.app.route('/500') def fivehundred():", "self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span", "self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if span == root: continue", "self.client.get('/501') self.assertEqual(res.status_code, 501) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert", "= spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0)", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span = spans[3]", "in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self):", "the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id)", "@self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res", "self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When making", "spans \"\"\" @self.app.route('/not-found') def not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code,", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request',", "1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id)", "'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) #", "no query string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0)", "class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When making a request We", ") for span in self.spans: if span == root: continue", "rate is set and globally trace search is disabled We", "self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request", "Explicitly enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/',", "'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When making a request When the", "in req_span.meta # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask')", "the requested endpoint calls `abort(501)` We create the expected spans", "'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans],", "'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def", "spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note: contains no query", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span handler_span", "res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans()", "'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self):", "string self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) #", "making a request When the request contains a query string", "200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/')", "url contains unicode We create the expected spans \"\"\" @self.app.route(u'/üŋïĉóđē')", "is not set and globally trace search is enabled We", "12, 0): self.assertEqual(len(spans), 11) # Assert the order of the", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request',", "disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345',", "Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred')", "spans \"\"\" @self.app.route('/500') def fivehundred(): raise Exception('500 error') res =", "self.get_spans() self.assertEqual(len(spans), 9) # Assert the order of the spans", "'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "HTTP_HEADER_PARENT_ID from flask import abort from . import BaseFlaskTestCase from", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "expected spans \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200", "error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\" When making a", "= self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in self.spans: if span ==", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web')", "self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')", "request When the url contains unicode We create the expected", "in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self):", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type,", "test_analytics_global_off_integration_default(self): \"\"\" When making a request When an integration trace", "properly set on the root span span = self.find_span_by_name(self.get_spans(), 'flask.request')", "10) # Assert the order of the spans created self.assertListEqual(", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler',", "== root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When making a", "tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'),", "'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self): \"\"\"", "self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When making a request", "request When the request contains a query string We create", "return 'Hello Flask', 200 res = self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200)", "1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def test_request_501(self): \"\"\" When", "b'Whoops') spans = self.get_spans() if flask_version >= (0, 12, 0):", "a request When the requested endpoint calls `abort(501)` We create", "making a request When the requested endpoint was not found", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "== root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When making a", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL),", "'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span", "for span in spans: self.assertEqual(span.service, 'flask') # Root request span", ". import BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception'", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span = spans[5]", "self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 0) def test_request_error_handler(self):", "'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service,", "index(): return 'Hello Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code, 200)", "\"\"\" When making a request When an integration trace search", "def test_analytics_global_off_integration_default(self): \"\"\" When making a request When an integration", "self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note: contains no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/')", "from ddtrace.ext import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from", "# Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL),", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception',", "Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span = spans[4]", "is disabled We expect the root span to not include", "0) def test_request_unicode(self): \"\"\" When making a request When the", "self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING", "index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True,", "self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no", "dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask')", "self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\" When making a request When", "for s in spans], ) # Assert span services for", "root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\" When making a request", "enabled and sample rate is set and globally trace search", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type,", "self.assertEqual(span.parent_id, 12345) # With distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)):", "tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span", "present We create the expected spans \"\"\" @self.app.route('/') def index():", "200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={", "base_exception_name) # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name,", "integration trace search is enabled and sample rate is set", "dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', })", "span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When making", "self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error'))", "`abort(501)` We create the expected spans \"\"\" @self.app.route('/501') def fivehundredone():", "making a request When the url contains unicode We create", "self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode')", "assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') # Dispatch span dispatch_span", "'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When making a", "# -*- coding: utf-8 -*- from ddtrace.compat import PY2 from", "the requested endpoint raises an exception We create the expected", "self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask')", "self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')", "= self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, ) for", "expected spans \"\"\" @self.app.route('/500') def fivehundred(): raise Exception('500 error') res", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "base_exception_name) def test_request_501(self): \"\"\" When making a request When the", "span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET", "self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented')", "span in self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response',", "def error_handler(e): return 'Whoops', 500 @self.app.route('/500') def fivehundred(): raise Exception('500", "headers are present We create the expected spans \"\"\" @self.app.route('/')", "\"\"\" When making a request When the requested endpoint raises", "making a request When an integration trace search is not", "self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request',", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.index',", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource, '/500')", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "def test_distributed_tracing(self): \"\"\" When making a request When distributed tracing", "'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res", "'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') #", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404')", "= self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When", "span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) #", "making a request When distributed tracing headers are present We", "'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags", "Assert span services for span in spans: self.assertEqual(span.service, 'flask') #", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response',", "span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # Explicitly", "we create the expected spans and capture the query string.\"\"\"", "\"\"\" When making a request When the requested endpoint was", "self.get_spans() self.assertEqual(len(spans), 8) # Assert the order of the spans", "@self.app.route('/') def index(): return 'Hello Flask', 200 res = self.client.get('/',", "self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error'))", "500 @self.app.route('/500') def fivehundred(): raise Exception('500 error') res = self.client.get('/500')", "def test_request_query_string(self): \"\"\" When making a request When the request", "import flask_version from ddtrace.ext import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID,", "self.assertIsNone(span.parent_id) def test_request_query_string(self): \"\"\" When making a request When the", "Note: contains no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def", "self.get_spans() if flask_version >= (0, 12, 0): self.assertEqual(len(spans), 11) #", "'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert the", "test_request_abort_404(self): \"\"\" When making a request When the requested endpoint", "@self.app.route('/501') def fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans", "return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code,", "def index(): return 'Hello Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code,", "req_span.meta # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name,", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request',", "dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', })", "no query string self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span", "no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\"", "flask_version >= (0, 12, 0): self.assertEqual(len(spans), 11) # Assert the", "tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span,", "the root span to have the appropriate tag \"\"\" @self.app.route('/')", "= self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans =", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web')", "Note: contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') #", "When the requested endpoint was not found We create the", "@self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500 @self.app.route('/500') def fivehundred(): raise", "import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID from flask import abort from . import", "'/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') #", "We create the expected spans \"\"\" @self.app.errorhandler(500) def error_handler(e): return", "self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler", "spans \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 #", "'678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent", "index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/')", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē')", "distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={", "'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in req_span.meta # Handler", "abort from . import BaseFlaskTestCase from ...utils import assert_span_http_status_code base_exception_name", "def test_analytics_global_off_integration_on(self): \"\"\" When making a request When an integration", "self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 10) # Assert", "self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service,", "= self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans()", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "that we create the expected spans and capture the query", "the expected spans and capture the query string.\"\"\" @self.app.route('/') def", "Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "12345) # With distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res", "'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], )", "if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When", "'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans()", "request When an integration trace search is enabled and sample", "res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code,", "self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace id are", "headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "self.client.get('/', query_string=dict(hello='flask')) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans()", "self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask')", "1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) # Handler span handler_span", "exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource,", "self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if flask_version >=", "# Assert span services for span in spans: self.assertEqual(span.service, 'flask')", "self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500')", "and trace id are properly set on the root span", "# Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) #", "Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res =", "'/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def", "string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0) def test_request_unicode(self): \"\"\" When making", "self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When", "integration trace search is not event sample rate is not", "self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # User exception span user_ex_span = spans[5]", "return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code,", "'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in", "root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\" When making a request", "tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID:", "b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the order", "with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)):", "test_analytics_global_on_integration_on(self): \"\"\" When making a request When an integration trace", "self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) def", "raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops')", "Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span =", "404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'),", "'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service,", "when making a request that we create the expected spans", "self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler", "the order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions',", "span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception')", "When making a request When the requested endpoint was not", "s in spans], ) else: self.assertEqual(len(spans), 10) # Assert the", "self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501)", "base_exception_name = 'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase):", "utf-8 -*- from ddtrace.compat import PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY", "200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/')", "'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With distributed tracing disabled", "self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When making a request", "# Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'),", "import assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception'", "# Explicitly enable distributed tracing with self.override_config('flask', dict(distributed_tracing_enabled=True)): res =", "globally trace search is disabled We expect the root span", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found')", "self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When making a request When an", "self.assertEqual(res.data, b'Hello Flask') root = self.get_root_span() self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) for span in", "Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note: contains no query string", "'http://localhost/not-found') assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service,", "'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service,", "/501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'),", "self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') # Dispatch span dispatch_span = spans[3]", "self.client.get('/500') self.assertEqual(res.status_code, 500) self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if flask_version", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "in spans: self.assertEqual(span.service, 'flask') # Root request span req_span =", "the expected spans \"\"\" @self.app.route('/501') def fivehundredone(): abort(501) res =", "0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'),", "= self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200)", "request When the requested endpoint calls `abort(501)` We create the", "def test_analytics_global_on_integration_default(self): \"\"\" When making a request When an integration", "When the requested endpoint calls `abort(404)` We create the expected", "sample rate is set and globally trace search is enabled", "self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "search is not event sample rate is not set and", "200 res = self.client.get(u'/üŋïĉóđē') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'\\xc3\\xbc\\xc5\\x8b\\xc3\\xaf\\xc4\\x89\\xc3\\xb3\\xc4\\x91\\xc4\\x93') spans =", "root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id) def", "the expected spans \"\"\" @self.app.route('/') def index(): return 'Hello Flask',", "'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags", "def test_request(self): \"\"\" When making a request We create the", "the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id,", "span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') # Note:", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type,", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains", "self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service,", "= self.find_span_by_name(self.get_spans(), 'flask.request') self.assertEqual(span.trace_id, 678910) self.assertEqual(span.parent_id, 12345) # With distributed", "'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s in spans], )", "When an integration trace search is not event sample rate", "@self.app.route('/') def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with", "a request When distributed tracing headers are present We create", "self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') self.assertEqual(handler_span.resource, u'/üŋïĉóđē') self.assertEqual(req_span.error, 0) def test_request_404(self): \"\"\" When", "tags assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When making", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET 404') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0)", "self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "1) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span,", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented'))", "spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the order of", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found')", "def not_found(): abort(404) res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans =", "error_handler(e): return 'Whoops', 500 @self.app.route('/500') def fivehundred(): raise Exception('500 error')", "When the request contains a query string We create the", ">= (0, 12, 0): self.assertEqual(len(spans), 11) # Assert the order", "`abort(404)` We create the expected spans \"\"\" @self.app.route('/not-found') def not_found():", "index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res = self.client.get('/')", "# Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request')", "}, ) for span in self.spans: if span == root:", "to not include tag \"\"\" @self.app.route('/') def index(): return 'Hello", "# Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone')", "self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') # Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask')", ") # Assert span services for span in spans: self.assertEqual(span.service,", "self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self): \"\"\" When making a request When", "'/501') # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name,", "404) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the order", "return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request',", "1) self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span", "self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound')", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "== root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When making a", "self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_500(self):", "self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_distributed_tracing(self): \"\"\"", "'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') # Dispatch span", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') self.assertEqual(handler_span.resource,", "base_exception_name) # User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask')", "0) def test_request_404(self): \"\"\" When making a request When the", "b'Hello Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 1.0,", "'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for", "error')) self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) # User exception span user_ex_span =", "distributed tracing enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID:", "self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root", "self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') self.assertEqual(handler_span.resource, '/501') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not", "a request When the requested endpoint raises an exception We", "test_analytics_global_off_integration_on(self): \"\"\" When making a request When an integration trace", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception',", "'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1)", "Handler span handler_span = spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource,", "'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name for s", "Flask') # Assert parent and trace id are properly set", "spans \"\"\" @self.app.route(u'/üŋïĉóđē') def unicode(): return 'üŋïĉóđē', 200 res =", "sure when making a request that we create the expected", "spans and capture the query string.\"\"\" @self.app.route('/') def index(): return", "We create the expected spans \"\"\" @self.app.route('/500') def fivehundred(): raise", "not include tag \"\"\" @self.app.route('/') def index(): return 'Hello Flask',", "1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\"", "# With distributed tracing disabled with self.override_config('flask', dict(distributed_tracing_enabled=False)): res =", "Root request span req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request')", "the root span span = self.find_span_by_name(self.get_spans(), 'flask.request') self.assertNotEqual(span.trace_id, 678910) self.assertIsNone(span.parent_id)", "When making a request When the requested endpoint calls `abort(501)`", "self.assertEqual(len(spans), 10) # Assert the order of the spans created", "= spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') self.assertEqual(req_span.span_type,", "b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8) # Assert the", "self.assertEqual(len(spans), 9) # Assert the order of the spans created", "tag \"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 with", "req_span = spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /')", "When the requested endpoint raises an exception We create the", "self.assertEqual(res.data, b'Whoops') spans = self.get_spans() if flask_version >= (0, 12,", "Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() #", "else: self.assertEqual(len(spans), 10) # Assert the order of the spans", "200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "is enabled We expect the root span to have the", "= 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def test_request(self): \"\"\" When making a", "the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception',", "http.QUERY_STRING not in req_span.meta # Handler span handler_span = spans[4]", "self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans),", "0.5, }, ) for span in self.spans: if span ==", "assert_span_http_status_code(req_span, 404) # Dispatch span dispatch_span = spans[3] self.assertEqual(dispatch_span.service, 'flask')", "'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') self.assertEqual(handler_span.resource, '/not-found') self.assertEqual(handler_span.error, 1) self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found'))", "distributed tracing headers are present We create the expected spans", "spans[3] self.assertEqual(dispatch_span.service, 'flask') self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501", "found We create the expected spans \"\"\" res = self.client.get('/not-found')", "self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception',", "spans[0] self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web')", "When distributed tracing headers are present We create the expected", "Default: distributed tracing enabled res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345',", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response',", "], [s.name for s in spans], ) # Assert span", "def index(): return 'Hello Flask', 200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz')", "self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request tags assert", "self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') # Handler span handler_span", "raises an exception We create the expected spans \"\"\" @self.app.route('/500')", "requested endpoint raises an exception We create the expected spans", "self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request", "'Hello Flask', 200 res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello", "self.assertEqual(req_span.service, 'flask') self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error,", "span services for span in spans: self.assertEqual(span.service, 'flask') # Root", "def test_analytics_global_on_integration_on(self): \"\"\" When making a request When an integration", "res = self.client.get('/not-found') self.assertEqual(res.status_code, 404) spans = self.get_spans() self.assertEqual(len(spans), 10)", "request When the requested endpoint was not found We create", "'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def", "= spans[4] self.assertEqual(handler_span.service, 'flask') self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0)", "Flask') root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, },", "contains no query string self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') # Note:", "# Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'),", "'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "500) spans = self.get_spans() self.assertEqual(len(spans), 9) # Assert the order", "'flask') self.assertEqual(req_span.name, 'flask.request') # Note: contains no query string self.assertEqual(req_span.resource,", "'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert", "'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) # Request tags", "'flask.request') self.assertEqual(req_span.resource, 'GET /500') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id) #", "flask_version from ddtrace.ext import http from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID", "tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') assert_span_http_status_code(req_span, 501) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') self.assertEqual(req_span.get_tag('flask.url_rule'),", "test_request_500(self): \"\"\" When making a request When the requested endpoint", "'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags", "not set and sample rate is set and globally trace", "self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID: '678910', }) self.assertEqual(res.status_code, 200) self.assertEqual(res.data,", "[ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request',", "self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4]", "expected spans \"\"\" @self.app.route('/not-found') def not_found(): abort(404) res = self.client.get('/not-found')", "def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): with self.override_config('flask',", "self.get_spans() self.assertEqual(len(spans), 10) # Assert the order of the spans", "'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span, 404) self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') #", "test_request_unicode(self): \"\"\" When making a request When the url contains", "# Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'), '/') self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL),", "'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): res = self.client.get('/') self.assertEqual(res.status_code, 200)", "import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import http", "def index(): return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=True)): res =", "'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "def fivehundredone(): abort(501) res = self.client.get('/501') self.assertEqual(res.status_code, 501) spans =", "order of the spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request',", "root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_on(self): \"\"\" When making a request", "expected spans \"\"\" @self.app.errorhandler(500) def error_handler(e): return 'Whoops', 500 @self.app.route('/500')", "'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') self.assertEqual(req_span.get_tag('flask.url_rule'),", "We expect the root span to have the appropriate tag", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundredone', 'flask.handle_user_exception', 'flask.handle_http_exception', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ], [s.name", "'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception', 'flask.handle_exception', 'tests.contrib.flask.test_request.error_handler', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext', ],", "ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from ddtrace.ext import http from", "is enabled and sample rate is set and globally trace", "self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) # Handler span handler_span = spans[4]", "an exception We create the expected spans \"\"\" @self.app.route('/500') def", "...utils import assert_span_http_status_code base_exception_name = 'builtins.Exception' if PY2: base_exception_name =", "calls `abort(501)` We create the expected spans \"\"\" @self.app.route('/501') def", "self.spans: if span == root: continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_on_integration_on(self): \"\"\"", "continue self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) def test_analytics_global_off_integration_default(self): \"\"\" When making a request When", "= 'builtins.Exception' if PY2: base_exception_name = 'exceptions.Exception' class FlaskRequestTestCase(BaseFlaskTestCase): def", "'tests.contrib.flask.test_request.index') # Note: contains no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error,", "}) self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and", "# Note: contains no query string self.assertEqual(handler_span.resource, '/') self.assertEqual(req_span.error, 0)", "spans created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.fivehundred', 'flask.handle_user_exception',", "with self.override_config('flask', dict(distributed_tracing_enabled=True)): res = self.client.get('/', headers={ HTTP_HEADER_PARENT_ID: '12345', HTTP_HEADER_TRACE_ID:", "200 with self.override_http_config('flask', dict(trace_query_string=True)): self.client.get('/?foo=bar&baz=biz') spans = self.get_spans() # Request", "self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.unicode', 'flask.process_response', 'flask.do_teardown_request', 'flask.do_teardown_appcontext',", "self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') self.assertEqual(user_ex_span.error, 1) self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(user_ex_span.get_tag('error.type'),", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') assert_span_http_status_code(req_span, 200) assert http.QUERY_STRING not in", "self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found')", "We expect the root span to not include tag \"\"\"", "== 'foo=bar&baz=biz' def test_analytics_global_on_integration_default(self): \"\"\" When making a request When", "Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500) spans = self.get_spans()", "self.assertEqual(req_span.name, 'flask.request') self.assertEqual(req_span.resource, 'GET /501') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 1) self.assertIsNone(req_span.parent_id)", "PY2 from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY from ddtrace.contrib.flask.patch import flask_version from", "Assert the order of the spans created self.assertListEqual( [ 'flask.request',", "search is disabled We expect the root span to have", "'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): res", "request When distributed tracing headers are present We create the", "\"\"\" When making a request When the url contains unicode", "the appropriate tag \"\"\" @self.app.route('/') def index(): return 'Hello Flask',", "self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') # Note:", "0) self.assertIsNone(req_span.parent_id) # Request tags self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') assert_span_http_status_code(req_span,", "Not Found')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') def test_request_abort_404(self): \"\"\" When making", "making a request When the requested endpoint calls `abort(404)` We", "created self.assertListEqual( [ 'flask.request', 'flask.try_trigger_before_first_request_functions', 'flask.preprocess_request', 'flask.dispatch_request', 'tests.contrib.flask.test_request.not_found', 'flask.handle_user_exception', 'flask.handle_http_exception',", "'Whoops', 500 @self.app.route('/500') def fivehundred(): raise Exception('500 error') res =", "# User exception span user_ex_span = spans[5] self.assertEqual(user_ex_span.service, 'flask') self.assertEqual(user_ex_span.name,", "return 'Hello Flask', 200 with self.override_global_config(dict(analytics_enabled=False)): with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)):", "an exception We create the expected spans \"\"\" @self.app.errorhandler(500) def", "\"\"\" @self.app.route('/') def index(): return 'Hello Flask', 200 res =", "trace search is not event sample rate is not set", "expect the root span to have the appropriate tag \"\"\"", "have the appropriate tag \"\"\" @self.app.route('/') def index(): return 'Hello", "disabled We expect the root span to not include tag", "a request We create the expected spans \"\"\" @self.app.route('/') def", "was not found We create the expected spans \"\"\" res", "self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') spans = self.get_spans() self.assertEqual(len(spans), 8)", "create the expected spans \"\"\" @self.app.route('/500') def fivehundred(): raise Exception('500", "create the expected spans \"\"\" res = self.client.get('/not-found') self.assertEqual(res.status_code, 404)", "def fivehundred(): raise Exception('500 error') res = self.client.get('/500') self.assertEqual(res.status_code, 500)", "self.assertEqual(req_span.get_tag('http.method'), 'GET') self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') assert_span_http_status_code(req_span, 500) self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') self.assertEqual(req_span.get_tag('flask.url_rule'), '/500')", "self.assertEqual(dispatch_span.name, 'flask.dispatch_request') self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') self.assertEqual(dispatch_span.error, 1) self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback'))", "'flask.request') self.assertEqual(req_span.resource, 'GET /not-found') self.assertEqual(req_span.span_type, 'web') self.assertEqual(req_span.error, 0) self.assertIsNone(req_span.parent_id) #", "res = self.client.get('/') self.assertEqual(res.status_code, 200) self.assertEqual(res.data, b'Hello Flask') root =", "root = self.get_root_span() root.assert_matches( name='flask.request', metrics={ ANALYTICS_SAMPLE_RATE_KEY: 0.5, }, )", "200) self.assertEqual(res.data, b'Hello Flask') # Assert parent and trace id", "expect the root span to not include tag \"\"\" @self.app.route('/')" ]
[ "passage ids into a dictionary \"\"\" sim_dict = {} lines", "text)) print(\"Removed \" + str(removed) + \" passages\") print(\"Dumping id", "as f: car_id_to_idx = pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id", "line in lines: data = line.strip().split(':') if len(data[1]) > 0:", "topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id not in", "len(topic_number_dict) // NUM_FOLD for i in range(NUM_FOLD): with open(out_topics_file +", "= 0 with open(args.msmarco_collection, \"r\") as m: for line in", "if len(data[1]) > 0: sim_docs = data[-1].split(',') for docs in", "all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not topic_number in topic_number_dict: topic_number_dict[topic_number]", "raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number']", "parser.parse_args() # INPUT sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\")", "os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir,", "\"2019qrels.txt\") # OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir,", "CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1 print(\"Processing", "\" passages\") print(\"Dumping id mappings to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file))", "out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file =", "out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not topic_number in topic_number_dict: topic_number_dict[topic_number] =", "car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx = pickle.load(f) else:", "= 10000000 i = 0 with open(out_collection_file, \"w\", encoding=\"utf-8\") as", "qid, _, pid, rel = line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid]", "\"r\") as fin: annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\")", "out_manual_queries.write(line) topic_query = splitted[0] query = splitted[1].strip() topic_id = topic_query.split('_')[0]", "in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\" + para.para_id text", "e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1", "deduplicated documents file and stores the duplicate passage ids into", "open(args.msmarco_collection, \"r\") as m: for line in tqdm(m): marco_id, text", "<filename>ConvDR/data/preprocess_cast19.py import argparse from trec_car import read_data from tqdm import", "passages, assign new ids car_id_to_idx = {} car_idx_to_id = []", "json.dumps(item) fout.write(json_str + '\\n') # Split eval data into K-fold", "continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" + str(removed) + \" passages\")", "trec_car import read_data from tqdm import tqdm import pickle import", "\"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") #", "group['number']), group.get('description', ''), group['turn'], group.get( 'title', '') queries = []", "\"\"\" Reads the deduplicated documents file and stores the duplicate", "open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx = pickle.load(f) else: sim_dict =", "os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file =", "import read_data from tqdm import tqdm import pickle import os", "topic_id not in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query", "\"r\") as oq, open(out_qrels_file, \"w\") as nq: for line in", "= parse_sim_file(sim_file) car_base_id = 10000000 i = 0 with open(out_collection_file,", "def parse_sim_file(filename): \"\"\" Reads the deduplicated documents file and stores", "os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as", "annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0] query =", "as nq: for line in oq: qid, _, pid, rel", "stores the duplicate passage ids into a dictionary \"\"\" sim_dict", "car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1 print(\"Processing MS MARCO...\") removed", "json.load(fin) with open(cast_topics_manual_file, \"r\") as fin: annonated_lines = fin.readlines() out_raw_queries", "int(pid[6:]) else: continue nq.write(qid + \"\\t0\\t\" + str(pid) + \"\\t\"", "os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file", "topic_number, description, turn, title = str( group['number']), group.get('description', ''), group['turn'],", "splitted[0] query = splitted[1].strip() topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1]", "= os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine TREC-CAR & MS MARCO,", "dictionary \"\"\" sim_dict = {} lines = open(filename).readlines() for line", "= {} for line in annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line)", "[] for group in raw_data: topic_number, description, turn, title =", "not topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with", "f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id, f)", "car_base_id = 10000000 i = 0 with open(out_collection_file, \"w\", encoding=\"utf-8\")", "data = line.strip().split(':') if len(data[1]) > 0: sim_docs = data[-1].split(',')", "CAsT utterances...\") with open(cast_topics_raw_file, \"r\") as fin: raw_data = json.load(fin)", "topic_per_fold == i: json_str = json.dumps(item) fout.write(json_str + '\\n') #", "K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD for i in range(NUM_FOLD):", "topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file,", "qrels print(\"Processing qrels...\") with open(cast_qrels_file, \"r\") as oq, open(out_qrels_file, \"w\")", "\"w\") out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated = {} for line", "topic_query.split('_')[1] if topic_id not in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id]", "eval data into K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD for", "i: json_str = json.dumps(item) fout.write(json_str + '\\n') # 3. Process", "line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid]", "parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\",", "all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict = {} data = []", "1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" + str(removed) + \"", "json import copy from utils.util import NUM_FOLD def parse_sim_file(filename): \"\"\"", "lines: data = line.strip().split(':') if len(data[1]) > 0: sim_docs =", "convert qrels print(\"Processing qrels...\") with open(cast_qrels_file, \"r\") as oq, open(out_qrels_file,", "= str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number'] =", "query out_manual_queries.close() topic_number_dict = {} data = [] for group", "Reads the deduplicated documents file and stores the duplicate passage", "\").replace(\"\\r\", \" \") idx = car_base_id + i car_id_to_idx[ car_id]", "+= 1 print(\"Processing MS MARCO...\") removed = 0 with open(args.msmarco_collection,", "import NUM_FOLD def parse_sim_file(filename): \"\"\" Reads the deduplicated documents file", "\" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \") idx = car_base_id +", "open(cast_topics_raw_file, \"r\") as fin: raw_data = json.load(fin) with open(cast_topics_manual_file, \"r\")", "assert car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid", "\"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file = os.path.join(args.out_data_dir,", "all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict = {}", "= {} lines = open(filename).readlines() for line in lines: data", "a dictionary \"\"\" sim_dict = {} lines = open(filename).readlines() for", "\"CAR_\" + para.para_id text = para.get_text() text = text.replace(\"\\t\", \"", "line in annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0]", "not in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query out_manual_queries.close()", "= \"CAR_\" + para.para_id text = para.get_text() text = text.replace(\"\\t\",", "'\\n') # Split eval data into K-fold topic_per_fold = len(topic_number_dict)", "[] for query in turn: query_number, raw_utterance = str( query['number']),", "{} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx,", "text = para.get_text() text = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\",", "fout.write(json_str + '\\n') # 3. Process and convert qrels print(\"Processing", "\"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str)", "json.dumps(item) fout.write(json_str + '\\n') # 3. Process and convert qrels", "= {} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict = {} data", "= para.get_text() text = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \"", "group.get( 'title', '') queries = [] for query in turn:", "query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number'] = topic_number record['query_number']", "for line in oq: qid, _, pid, rel = line.strip().split()", "= os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir,", "run print(\"Processing TREC-CAR...\") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id", "to 'w' in normal run print(\"Processing TREC-CAR...\") for para in", "= topic_number_dict[item['topic_number']] if idx // topic_per_fold == i: json_str =", "oq: qid, _, pid, rel = line.strip().split() if pid.startswith(\"CAR_\"): assert", "= args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir,", "duplicate passages, assign new ids car_id_to_idx = {} car_idx_to_id =", "if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found.", "description record['title'] = title record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number]", "title = str( group['number']), group.get('description', ''), group['turn'], group.get( 'title', '')", "fin: annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries =", "all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict =", "as fout: for item in data: json_str = json.dumps(item) fout.write(json_str", "query_number, raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {}", "= topic_number record['query_number'] = query_number record['description'] = description record['title'] =", "0 with open(out_collection_file, \"w\", encoding=\"utf-8\") as f: #FIX change 'a'", "data[-1].split(',') for docs in sim_docs: sim_dict[docs] = 1 return sim_dict", "(\"MARCO_\" + marco_id) in sim_dict: removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id,", "car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file =", "len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as fout: for item", "str(i), 'w') as fout: for item in data: idx =", "and stores the duplicate passage ids into a dictionary \"\"\"", "car_id = \"CAR_\" + para.para_id text = para.get_text() text =", "[] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection", "new ids car_id_to_idx = {} car_idx_to_id = [] if os.path.exists(out_collection_file)", "i += 1 print(\"Processing MS MARCO...\") removed = 0 with", "MS MARCO...\") removed = 0 with open(args.msmarco_collection, \"r\") as m:", "query_id = topic_query.split('_')[1] if topic_id not in all_annonated: all_annonated[topic_id] =", "\"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file", "Combine TREC-CAR & MS MARCO, remove duplicate passages, assign new", "parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args() # INPUT sim_file", "query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number'] = topic_number record['query_number'] =", "2. Process queries print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file, \"r\") as", "os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\")", "'rb'))): car_id = \"CAR_\" + para.para_id text = para.get_text() text", "as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id,", "collection found. Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx", "open(out_qrels_file, \"w\") as nq: for line in oq: qid, _,", "for docs in sim_docs: sim_dict[docs] = 1 return sim_dict if", "out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine TREC-CAR & MS", "normal run print(\"Processing TREC-CAR...\") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))):", "= os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir,", "f: #FIX change 'a' to 'w' in normal run print(\"Processing", "in sim_docs: sim_dict[docs] = 1 return sim_dict if __name__ ==", "+ \".\" + str(i), 'w') as fout: for item in", "type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args", "+ marco_id) in sim_dict: removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text))", "'w') as fout: for item in data: json_str = json.dumps(item)", "out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file =", "= len(topic_number_dict) // NUM_FOLD for i in range(NUM_FOLD): with open(out_topics_file", "{} data = [] for group in raw_data: topic_number, description,", "record['topic_number'] = topic_number record['query_number'] = query_number record['description'] = description record['title']", "= {} data = [] for group in raw_data: topic_number,", "sim_dict if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str)", "fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated", "= os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file", "turn, title = str( group['number']), group.get('description', ''), group['turn'], group.get( 'title',", "3. Process and convert qrels print(\"Processing qrels...\") with open(cast_qrels_file, \"r\")", "import copy from utils.util import NUM_FOLD def parse_sim_file(filename): \"\"\" Reads", "TREC-CAR...\") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\"", "= [] for query in turn: query_number, raw_utterance = str(", "pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else: continue", "= car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else: continue nq.write(qid", "f: pickle.dump(car_idx_to_id, f) # 2. Process queries print(\"Processing CAsT utterances...\")", "print(\"Removed \" + str(removed) + \" passages\") print(\"Dumping id mappings", "open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated = {} for", "# 1. Combine TREC-CAR & MS MARCO, remove duplicate passages,", "with open(cast_topics_manual_file, \"r\") as fin: annonated_lines = fin.readlines() out_raw_queries =", "from utils.util import NUM_FOLD def parse_sim_file(filename): \"\"\" Reads the deduplicated", "sim_dict: removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" +", "car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading car_id_to_idx...\") with open(car_id_to_idx_file,", "= 1 return sim_dict if __name__ == \"__main__\": parser =", "copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not topic_number", "text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \") idx = car_base_id", "line.strip().split(':') if len(data[1]) > 0: sim_docs = data[-1].split(',') for docs", "\"r\") as fin: raw_data = json.load(fin) with open(cast_topics_manual_file, \"r\") as", "print(\"Processing TREC-CAR...\") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id =", "topic_query = splitted[0] query = splitted[1].strip() topic_id = topic_query.split('_')[0] query_id", "type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\",", "= os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir,", "= [] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed", "line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id) in sim_dict: removed += 1", "= json.dumps(item) fout.write(json_str + '\\n') # Split eval data into", "= copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not", "tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\" + para.para_id text =", "json_str = json.dumps(item) fout.write(json_str + '\\n') # 3. Process and", "MS MARCO, remove duplicate passages, assign new ids car_id_to_idx =", "1 return sim_dict if __name__ == \"__main__\": parser = argparse.ArgumentParser()", "text = line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id) in sim_dict: removed", "open(cast_topics_manual_file, \"r\") as fin: annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file,", "topic_number_dict[item['topic_number']] if idx // topic_per_fold == i: json_str = json.dumps(item)", "in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict", "topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as", "else: continue nq.write(qid + \"\\t0\\t\" + str(pid) + \"\\t\" +", "type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args() # INPUT", "= parser.parse_args() # INPUT sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir,", "= [] for group in raw_data: topic_number, description, turn, title", "// topic_per_fold == i: json_str = json.dumps(item) fout.write(json_str + '\\n')", "MARCO, remove duplicate passages, assign new ids car_id_to_idx = {}", "assign new ids car_id_to_idx = {} car_idx_to_id = [] if", "with open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx = pickle.load(f) else: sim_dict", "topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as fout:", "import json import copy from utils.util import NUM_FOLD def parse_sim_file(filename):", "f: car_id_to_idx = pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id =", "\"w\", encoding=\"utf-8\") as f: #FIX change 'a' to 'w' in", "json_str = json.dumps(item) fout.write(json_str + '\\n') # Split eval data", "nq.write(qid + \"\\t0\\t\" + str(pid) + \"\\t\" + rel +", "with open(cast_topics_raw_file, \"r\") as fin: raw_data = json.load(fin) with open(cast_topics_manual_file,", "__name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str)", "\"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file", "= pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id = 10000000 i", "line in oq: qid, _, pid, rel = line.strip().split() if", "open(cast_qrels_file, \"r\") as oq, open(out_qrels_file, \"w\") as nq: for line", "in range(NUM_FOLD): with open(out_topics_file + \".\" + str(i), 'w') as", "in lines: data = line.strip().split(':') if len(data[1]) > 0: sim_docs", "\" \").replace(\"\\r\", \" \") idx = car_base_id + i car_id_to_idx[", "marco_id) in sim_dict: removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed", "as fin: annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries", "os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine TREC-CAR & MS MARCO, remove", "to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as f:", "= description record['title'] = title record['input'] = copy.deepcopy(queries) record['target'] =", "range(NUM_FOLD): with open(out_topics_file + \".\" + str(i), 'w') as fout:", "argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\",", "= idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text))", "queries = [] for query in turn: query_number, raw_utterance =", "turn: query_number, raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record =", "= text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \") idx =", "print(\"Dumping id mappings to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file,", "cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\")", "tqdm(m): marco_id, text = line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id) in", "= argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str)", "record['description'] = description record['title'] = title record['input'] = copy.deepcopy(queries) record['target']", "pid = int(pid[6:]) else: continue nq.write(qid + \"\\t0\\t\" + str(pid)", "'a' to 'w' in normal run print(\"Processing TREC-CAR...\") for para", "\"wb\") as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\") as f:", "open(out_topics_file, 'w') as fout: for item in data: json_str =", "query_number record['description'] = description record['title'] = title record['input'] = copy.deepcopy(queries)", "NUM_FOLD for i in range(NUM_FOLD): with open(out_topics_file + \".\" +", "oq, open(out_qrels_file, \"w\") as nq: for line in oq: qid,", "0 with open(args.msmarco_collection, \"r\") as m: for line in tqdm(m):", "read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\" + para.para_id text = para.get_text()", "\" + str(removed) + \" passages\") print(\"Dumping id mappings to", "record['title'] = title record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number,", "Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx = pickle.load(f)", "+ '\\n') # 3. Process and convert qrels print(\"Processing qrels...\")", "all_annonated = {} for line in annonated_lines: splitted = line.split('\\t')", "in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w')", "encoding=\"utf-8\") as f: #FIX change 'a' to 'w' in normal", "f) with open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id, f) # 2.", "with open(out_topics_file + \".\" + str(i), 'w') as fout: for", "title record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance))", "car_base_id + i car_id_to_idx[ car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3", "import pickle import os import json import copy from utils.util", "pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"):", "data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as fout: for item in", "lines = open(filename).readlines() for line in lines: data = line.strip().split(':')", "as f: pickle.dump(car_idx_to_id, f) # 2. Process queries print(\"Processing CAsT", "print(\"Processing qrels...\") with open(cast_qrels_file, \"r\") as oq, open(out_qrels_file, \"w\") as", "tqdm import pickle import os import json import copy from", "documents file and stores the duplicate passage ids into a", "with open(args.msmarco_collection, \"r\") as m: for line in tqdm(m): marco_id,", "topic_per_fold = len(topic_number_dict) // NUM_FOLD for i in range(NUM_FOLD): with", "for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\" +", "item in data: idx = topic_number_dict[item['topic_number']] if idx // topic_per_fold", "argparse from trec_car import read_data from tqdm import tqdm import", "idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i", "elif pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else: continue nq.write(qid + \"\\t0\\t\"", "+ str(removed) + \" passages\") print(\"Dumping id mappings to {}", "os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading", "out_raw_queries.close() with open(out_topics_file, 'w') as fout: for item in data:", "as oq, open(out_qrels_file, \"w\") as nq: for line in oq:", "splitted = line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0] query = splitted[1].strip()", "para in tqdm( read_data.iter_paragraphs(open(args.car_cbor, 'rb'))): car_id = \"CAR_\" + para.para_id", "sim_dict = parse_sim_file(sim_file) car_base_id = 10000000 i = 0 with", "= os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine", "ids car_id_to_idx = {} car_idx_to_id = [] if os.path.exists(out_collection_file) and", "= all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not topic_number in topic_number_dict:", "data into K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD for i", "== i: json_str = json.dumps(item) fout.write(json_str + '\\n') # 3.", "utils.util import NUM_FOLD def parse_sim_file(filename): \"\"\" Reads the deduplicated documents", "query = splitted[1].strip() topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1] if", "cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file", "= len(topic_number_dict) data.append(record) out_raw_queries.close() with open(out_topics_file, 'w') as fout: for", "1. Combine TREC-CAR & MS MARCO, remove duplicate passages, assign", "\"r\") as m: for line in tqdm(m): marco_id, text =", "> 0: sim_docs = data[-1].split(',') for docs in sim_docs: sim_dict[docs]", "'') queries = [] for query in turn: query_number, raw_utterance", "= line.strip().split(':') if len(data[1]) > 0: sim_docs = data[-1].split(',') for", "nq: for line in oq: qid, _, pid, rel =", "pickle import os import json import copy from utils.util import", "#FIX change 'a' to 'w' in normal run print(\"Processing TREC-CAR...\")", "utterances...\") with open(cast_topics_raw_file, \"r\") as fin: raw_data = json.load(fin) with", "in raw_data: topic_number, description, turn, title = str( group['number']), group.get('description',", "str( group['number']), group.get('description', ''), group['turn'], group.get( 'title', '') queries =", "str(removed) + \" passages\") print(\"Dumping id mappings to {} and", "+ \"\\t0\\t\" + str(pid) + \"\\t\" + rel + \"\\n\")", "open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\") as", "car_id_to_idx = {} car_idx_to_id = [] if os.path.exists(out_collection_file) and os.path.exists(", "parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args() # INPUT sim_file = args.duplicate_file", "with open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id, f) # 2. Process", "car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file,", "open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id, f) # 2. Process queries", "from trec_car import read_data from tqdm import tqdm import pickle", "queries print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file, \"r\") as fin: raw_data", "'w' in normal run print(\"Processing TREC-CAR...\") for para in tqdm(", "# INPUT sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file", "for group in raw_data: topic_number, description, turn, title = str(", "for line in lines: data = line.strip().split(':') if len(data[1]) >", "type=str) parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args() # INPUT sim_file =", "'w') as fout: for item in data: idx = topic_number_dict[item['topic_number']]", "para.get_text() text = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \")", "sim_docs: sim_dict[docs] = 1 return sim_dict if __name__ == \"__main__\":", "parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args =", "= os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir,", "10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1 print(\"Processing MS MARCO...\")", "\"\"\" sim_dict = {} lines = open(filename).readlines() for line in", "topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id not in all_annonated: all_annonated[topic_id]", "os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\")", "car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") # 1.", "= 0 with open(out_collection_file, \"w\", encoding=\"utf-8\") as f: #FIX change", "parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str)", "removed = 0 with open(args.msmarco_collection, \"r\") as m: for line", "print(\"Processing MS MARCO...\") removed = 0 with open(args.msmarco_collection, \"r\") as", "type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args()", "ids into a dictionary \"\"\" sim_dict = {} lines =", "annonated_lines = fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file,", "'title', '') queries = [] for query in turn: query_number,", "Process queries print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file, \"r\") as fin:", "record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if not topic_number in", "tqdm import tqdm import pickle import os import json import", "= title record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number,", "duplicate passage ids into a dictionary \"\"\" sim_dict = {}", "removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" + str(removed)", "as fin: raw_data = json.load(fin) with open(cast_topics_manual_file, \"r\") as fin:", "= car_base_id + i car_id_to_idx[ car_id] = idx # e.g.", "query_number, raw_utterance)) if not topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict)", "= open(filename).readlines() for line in lines: data = line.strip().split(':') if", "for line in tqdm(m): marco_id, text = line.strip().split(\"\\t\") if (\"MARCO_\"", "len(data[1]) > 0: sim_docs = data[-1].split(',') for docs in sim_docs:", "sim_docs = data[-1].split(',') for docs in sim_docs: sim_dict[docs] = 1", "if not topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record) out_raw_queries.close()", "\"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file", "print(\"Preprocessed collection found. Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as f:", "queries.append(raw_utterance) record = {} record['topic_number'] = topic_number record['query_number'] = query_number", "os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\")", "the duplicate passage ids into a dictionary \"\"\" sim_dict =", "mappings to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as", "data = [] for group in raw_data: topic_number, description, turn,", "data: idx = topic_number_dict[item['topic_number']] if idx // topic_per_fold == i:", "# e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i +=", "group.get('description', ''), group['turn'], group.get( 'title', '') queries = [] for", "OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file", "INPUT sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file =", "# OUTPUT out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\")", "and convert qrels print(\"Processing qrels...\") with open(cast_qrels_file, \"r\") as oq,", "idx // topic_per_fold == i: json_str = json.dumps(item) fout.write(json_str +", "= fin.readlines() out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file, \"w\")", "type=str) args = parser.parse_args() # INPUT sim_file = args.duplicate_file cast_topics_raw_file", "parse_sim_file(sim_file) car_base_id = 10000000 i = 0 with open(out_collection_file, \"w\",", "line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0] query = splitted[1].strip() topic_id =", "!= -1 pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid = int(pid[6:])", "pid, rel = line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1", "if topic_id not in all_annonated: all_annonated[topic_id] = {} all_annonated[topic_id][query_id] =", "fin: raw_data = json.load(fin) with open(cast_topics_manual_file, \"r\") as fin: annonated_lines", "{} lines = open(filename).readlines() for line in lines: data =", "in sim_dict: removed += 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \"", "= open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated = {}", "= {} car_idx_to_id = [] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file)", "open(out_topics_file + \".\" + str(i), 'w') as fout: for item", "passages\") print(\"Dumping id mappings to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with", "out_topics_file = os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file =", "{} car_idx_to_id = [] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and", "out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated = {} for line in", "as fout: for item in data: idx = topic_number_dict[item['topic_number']] if", "TREC-CAR & MS MARCO, remove duplicate passages, assign new ids", "= os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT", "and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\")", "read_data from tqdm import tqdm import pickle import os import", "qrels...\") with open(cast_qrels_file, \"r\") as oq, open(out_qrels_file, \"w\") as nq:", "-> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1 print(\"Processing MS", "parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str) parser.add_argument(\"--out_collection_dir\", type=str) args = parser.parse_args() #", "10000000 i = 0 with open(out_collection_file, \"w\", encoding=\"utf-8\") as f:", "f.write(\"{}\\t{}\\n\".format(idx, text)) i += 1 print(\"Processing MS MARCO...\") removed =", "{} record['topic_number'] = topic_number record['query_number'] = query_number record['description'] = description", "{}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx, f) with", "= os.path.join(args.out_data_dir, \"eval_topics.jsonl\") out_raw_queries_file = os.path.join(args.out_data_dir, \"queries.raw.tsv\") out_manual_queries_file = os.path.join(args.out_data_dir,", "sim_file = args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join(", "in normal run print(\"Processing TREC-CAR...\") for para in tqdm( read_data.iter_paragraphs(open(args.car_cbor,", "= line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id) in sim_dict: removed +=", "= topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id not in all_annonated:", "text = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \").replace(\"\\r\", \" \") idx", "str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record = {} record['topic_number'] = topic_number", "idx = car_base_id + i car_id_to_idx[ car_id] = idx #", "= splitted[1].strip() topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id", "\").replace(\"\\n\", \" \").replace(\"\\r\", \" \") idx = car_base_id + i", "args = parser.parse_args() # INPUT sim_file = args.duplicate_file cast_topics_raw_file =", "= data[-1].split(',') for docs in sim_docs: sim_dict[docs] = 1 return", "as f: #FIX change 'a' to 'w' in normal run", "os.path.join(args.out_data_dir, \"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\")", "cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") #", "Split eval data into K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD", "into a dictionary \"\"\" sim_dict = {} lines = open(filename).readlines()", "+ str(i), 'w') as fout: for item in data: idx", "= json.load(fin) with open(cast_topics_manual_file, \"r\") as fin: annonated_lines = fin.readlines()", "description, turn, title = str( group['number']), group.get('description', ''), group['turn'], group.get(", "rel = line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1 pid", "sim_dict[docs] = 1 return sim_dict if __name__ == \"__main__\": parser", "id mappings to {} and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\")", "_, pid, rel = line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] !=", "in data: json_str = json.dumps(item) fout.write(json_str + '\\n') # Split", "\"\\t0\\t\" + str(pid) + \"\\t\" + rel + \"\\n\") print(\"End\")", "# 2. Process queries print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file, \"r\")", "= str( group['number']), group.get('description', ''), group['turn'], group.get( 'title', '') queries", "topic_number record['query_number'] = query_number record['description'] = description record['title'] = title", "found. Loading car_id_to_idx...\") with open(car_id_to_idx_file, \"rb\") as f: car_id_to_idx =", "with open(cast_qrels_file, \"r\") as oq, open(out_qrels_file, \"w\") as nq: for", "f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" + str(removed) + \" passages\") print(\"Dumping", "raw_data: topic_number, description, turn, title = str( group['number']), group.get('description', ''),", "= query out_manual_queries.close() topic_number_dict = {} data = [] for", "# Split eval data into K-fold topic_per_fold = len(topic_number_dict) //", "in tqdm(m): marco_id, text = line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id)", "in turn: query_number, raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance) record", "fout: for item in data: json_str = json.dumps(item) fout.write(json_str +", "os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file = os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\")", "fout.write(json_str + '\\n') # Split eval data into K-fold topic_per_fold", "= open(out_manual_queries_file, \"w\") all_annonated = {} for line in annonated_lines:", "out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file =", "with open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\")", "= {} record['topic_number'] = topic_number record['query_number'] = query_number record['description'] =", "-1 pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else:", "Process and convert qrels print(\"Processing qrels...\") with open(cast_qrels_file, \"r\") as", "parse_sim_file(filename): \"\"\" Reads the deduplicated documents file and stores the", "continue nq.write(qid + \"\\t0\\t\" + str(pid) + \"\\t\" + rel", "i = 0 with open(out_collection_file, \"w\", encoding=\"utf-8\") as f: #FIX", "\".\" + str(i), 'w') as fout: for item in data:", "group['turn'], group.get( 'title', '') queries = [] for query in", "MARCO...\") removed = 0 with open(args.msmarco_collection, \"r\") as m: for", "car_id_to_idx = pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id = 10000000", "car_idx_to_id = [] if os.path.exists(out_collection_file) and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file):", "= splitted[0] query = splitted[1].strip() topic_id = topic_query.split('_')[0] query_id =", "for item in data: idx = topic_number_dict[item['topic_number']] if idx //", "for item in data: json_str = json.dumps(item) fout.write(json_str + '\\n')", "\"queries.manual.tsv\") out_qrels_file = os.path.join(args.out_data_dir, \"qrels.tsv\") car_id_to_idx_file = os.path.join(args.out_collection_dir, \"car_id_to_idx.pickle\") car_idx_to_id_file", "car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id) f.write(\"{}\\t{}\\n\".format(idx,", "return sim_dict if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\",", "parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\", type=str) parser.add_argument(\"--cast_dir\", type=str) parser.add_argument(\"--out_data_dir\", type=str)", "1 print(\"Processing MS MARCO...\") removed = 0 with open(args.msmarco_collection, \"r\")", "for i in range(NUM_FOLD): with open(out_topics_file + \".\" + str(i),", "if __name__ == \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\",", "file and stores the duplicate passage ids into a dictionary", "pickle.load(f) else: sim_dict = parse_sim_file(sim_file) car_base_id = 10000000 i =", "splitted[1].strip() topic_id = topic_query.split('_')[0] query_id = topic_query.split('_')[1] if topic_id not", "= line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0] query = splitted[1].strip() topic_id", "group in raw_data: topic_number, description, turn, title = str( group['number']),", "{} for line in annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line) topic_query", "args.duplicate_file cast_topics_raw_file = os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\")", "record['input'] = copy.deepcopy(queries) record['target'] = all_annonated[topic_number][query_number] out_raw_queries.write(\"{}_{}\\t{}\\n\".format(topic_number, query_number, raw_utterance)) if", "else: sim_dict = parse_sim_file(sim_file) car_base_id = 10000000 i = 0", "sim_dict = {} lines = open(filename).readlines() for line in lines:", "query in turn: query_number, raw_utterance = str( query['number']), query['raw_utterance'] queries.append(raw_utterance)", "import tqdm import pickle import os import json import copy", "m: for line in tqdm(m): marco_id, text = line.strip().split(\"\\t\") if", "line in tqdm(m): marco_id, text = line.strip().split(\"\\t\") if (\"MARCO_\" +", "+ \" passages\") print(\"Dumping id mappings to {} and {}...\".format(car_id_to_idx_file,", "# 3. Process and convert qrels print(\"Processing qrels...\") with open(cast_qrels_file,", "copy from utils.util import NUM_FOLD def parse_sim_file(filename): \"\"\" Reads the", "\") idx = car_base_id + i car_id_to_idx[ car_id] = idx", "i car_id_to_idx[ car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044", "import argparse from trec_car import read_data from tqdm import tqdm", "as m: for line in tqdm(m): marco_id, text = line.strip().split(\"\\t\")", "change 'a' to 'w' in normal run print(\"Processing TREC-CAR...\") for", "pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else: continue nq.write(qid + \"\\t0\\t\" +", "fout: for item in data: idx = topic_number_dict[item['topic_number']] if idx", "i in range(NUM_FOLD): with open(out_topics_file + \".\" + str(i), 'w')", "\"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine TREC-CAR &", "+ para.para_id text = para.get_text() text = text.replace(\"\\t\", \" \").replace(\"\\n\",", "0: sim_docs = data[-1].split(',') for docs in sim_docs: sim_dict[docs] =", "= json.dumps(item) fout.write(json_str + '\\n') # 3. Process and convert", "\"w\") as nq: for line in oq: qid, _, pid,", "car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid = int(pid[6:]) else: continue nq.write(qid +", "car_id_to_idx[ car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 -> 10000044 car_idx_to_id.append(car_id)", "marco_id, text = line.strip().split(\"\\t\") if (\"MARCO_\" + marco_id) in sim_dict:", "record['query_number'] = query_number record['description'] = description record['title'] = title record['input']", "for line in annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line) topic_query =", "out_raw_queries = open(out_raw_queries_file, \"w\") out_manual_queries = open(out_manual_queries_file, \"w\") all_annonated =", "if (\"MARCO_\" + marco_id) in sim_dict: removed += 1 continue", "& MS MARCO, remove duplicate passages, assign new ids car_id_to_idx", "item in data: json_str = json.dumps(item) fout.write(json_str + '\\n') #", "\" \") idx = car_base_id + i car_id_to_idx[ car_id] =", "os import json import copy from utils.util import NUM_FOLD def", "in data: idx = topic_number_dict[item['topic_number']] if idx // topic_per_fold ==", "= os.path.join(args.cast_dir, \"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file =", "para.para_id text = para.get_text() text = text.replace(\"\\t\", \" \").replace(\"\\n\", \"", "out_manual_queries.close() topic_number_dict = {} data = [] for group in", "into K-fold topic_per_fold = len(topic_number_dict) // NUM_FOLD for i in", "and os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading car_id_to_idx...\")", "+ '\\n') # Split eval data into K-fold topic_per_fold =", "print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file, \"r\") as fin: raw_data =", "= query_number record['description'] = description record['title'] = title record['input'] =", "'\\n') # 3. Process and convert qrels print(\"Processing qrels...\") with", "os.path.join(args.out_collection_dir, \"car_idx_to_id.pickle\") out_collection_file = os.path.join(args.out_collection_dir, \"collection.tsv\") # 1. Combine TREC-CAR", "the deduplicated documents file and stores the duplicate passage ids", "from tqdm import tqdm import pickle import os import json", "''), group['turn'], group.get( 'title', '') queries = [] for query", "{} all_annonated[topic_id][query_id] = query out_manual_queries.close() topic_number_dict = {} data =", "car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid] elif pid.startswith(\"MARCO_\"): pid =", "in oq: qid, _, pid, rel = line.strip().split() if pid.startswith(\"CAR_\"):", "\"wb\") as f: pickle.dump(car_idx_to_id, f) # 2. Process queries print(\"Processing", "== \"__main__\": parser = argparse.ArgumentParser() parser.add_argument(\"--car_cbor\", type=str) parser.add_argument(\"--msmarco_collection\", type=str) parser.add_argument(\"--duplicate_file\",", "data: json_str = json.dumps(item) fout.write(json_str + '\\n') # Split eval", "f) # 2. Process queries print(\"Processing CAsT utterances...\") with open(cast_topics_raw_file,", "if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1 pid = car_id_to_idx[pid] elif", "import os import json import copy from utils.util import NUM_FOLD", "pickle.dump(car_idx_to_id, f) # 2. Process queries print(\"Processing CAsT utterances...\") with", "raw_data = json.load(fin) with open(cast_topics_manual_file, \"r\") as fin: annonated_lines =", "\"evaluation_topics_v1.0.json\") cast_topics_manual_file = os.path.join( args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\")", "+= 1 continue f.write(\"{}\\t{}\\n\".format(marco_id, text)) print(\"Removed \" + str(removed) +", "remove duplicate passages, assign new ids car_id_to_idx = {} car_idx_to_id", "raw_utterance)) if not topic_number in topic_number_dict: topic_number_dict[topic_number] = len(topic_number_dict) data.append(record)", "// NUM_FOLD for i in range(NUM_FOLD): with open(out_topics_file + \".\"", "NUM_FOLD def parse_sim_file(filename): \"\"\" Reads the deduplicated documents file and", "pickle.dump(car_id_to_idx, f) with open(car_idx_to_id_file, \"wb\") as f: pickle.dump(car_idx_to_id, f) #", "= int(pid[6:]) else: continue nq.write(qid + \"\\t0\\t\" + str(pid) +", "args.cast_dir, \"evaluation_topics_annotated_resolved_v1.0.tsv\") cast_qrels_file = os.path.join(args.cast_dir, \"2019qrels.txt\") # OUTPUT out_topics_file =", "\"w\") all_annonated = {} for line in annonated_lines: splitted =", "for query in turn: query_number, raw_utterance = str( query['number']), query['raw_utterance']", "os.path.exists( car_id_to_idx_file) and os.path.exists(car_idx_to_id_file): print(\"Preprocessed collection found. Loading car_id_to_idx...\") with", "with open(out_topics_file, 'w') as fout: for item in data: json_str", "idx = topic_number_dict[item['topic_number']] if idx // topic_per_fold == i: json_str", "open(filename).readlines() for line in lines: data = line.strip().split(':') if len(data[1])", "with open(out_collection_file, \"w\", encoding=\"utf-8\") as f: #FIX change 'a' to", "open(out_collection_file, \"w\", encoding=\"utf-8\") as f: #FIX change 'a' to 'w'", "if idx // topic_per_fold == i: json_str = json.dumps(item) fout.write(json_str", "in annonated_lines: splitted = line.split('\\t') out_manual_queries.write(line) topic_query = splitted[0] query", "record = {} record['topic_number'] = topic_number record['query_number'] = query_number record['description']", "text)) i += 1 print(\"Processing MS MARCO...\") removed = 0", "and {}...\".format(car_id_to_idx_file, car_idx_to_id_file)) with open(car_id_to_idx_file, \"wb\") as f: pickle.dump(car_id_to_idx, f)", "\"collection.tsv\") # 1. Combine TREC-CAR & MS MARCO, remove duplicate", "\"rb\") as f: car_id_to_idx = pickle.load(f) else: sim_dict = parse_sim_file(sim_file)", "open(out_manual_queries_file, \"w\") all_annonated = {} for line in annonated_lines: splitted", "= line.strip().split() if pid.startswith(\"CAR_\"): assert car_id_to_idx[pid] != -1 pid =", "topic_number_dict = {} data = [] for group in raw_data:", "docs in sim_docs: sim_dict[docs] = 1 return sim_dict if __name__", "= topic_query.split('_')[1] if topic_id not in all_annonated: all_annonated[topic_id] = {}", "+ i car_id_to_idx[ car_id] = idx # e.g. CAR_76a4a716d4b1b01995c6663ee16e94b4ca35fdd3 ->" ]
[ "further processing n_stack += 1 sig_start[n_stack - 1] = sig", "http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon geojson object return polygon", "coordTransform_utils import wgs84togcj02 from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2):", "inside poly) return true else false \"\"\" coords = [poly['coordinates']]", "base point and a distance Keyword arguments: pt -- polygon", "- source[end][\"lng\"] y23 = source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23) >", "= float(dist) / 6371 # convert dist to angular distance", "linestring point if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for", "vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]): inside = not", "multipoly): \"\"\" valid whether the point is located in a", "math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2", "return radius \"\"\" return number * math.pi / 180 def", "[] for i in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o:", "polygon is inside a radius around a center Keyword arguments:", "= (b2_y - b1_y) * (a2_x - a1_x) - (b2_x", "/ 2), 2) + math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon", "geometry_within_radius(geometry, center, radius): \"\"\" To valid whether point or linestring", "= (lon2 + 3 * math.pi) % (2 * math.pi)", "* x12) * (x13 * y12 - y13 * x12)", "inside a radius around a center Keyword arguments: geometry --", "(d12 + d23): dev_sqr = d23 elif d23 >= (d12", "stack for further processing n_stack += 1 sig_start[n_stack - 1]", "line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y", "coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside = False i =", "line2['coordinates'][j + 1][0] ua_t = (b2_x - b1_x) * (a1_y", "radius return degree \"\"\" return number * 180 / math.pi", "point2): \"\"\" calculate the distance between two point on the", "* f_total y_total += (p1_y + p2_y) * f_total j", "* (source[end][\"lat\"] + source[start][\"lat\"])) # use avg lat to reduce", "while i < len(vert): if ((vert[i][0] > y) != (vert[j][0]", "point['coordinates'][0], coord): inside_poly = True return inside_poly def point_in_polygon(point, poly):", "360.0 - math.fabs(x12) x12 *= math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"]))", "x_total += (p1_x + p2_x) * f_total y_total += (p1_y", "math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) * 1000", "n_dest, start, end, i, sig; # dev_sqr, max_dev_sqr, band_sqr; #", "= (x23 * x23) + (y23 * y23) if d13", "r = [] for i in range(0, n_dest): r.append(source_coord[index[i]]) return", "centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly --", "kept kink depth is the height of the triangle abc", "0: u_a = ua_t / u_b u_b = ub_t /", "return destination point object \"\"\" dist = float(dist) / 6371", "object if(point inside multipoly) return true else false \"\"\" lon1", "in coordinates: for line in lines: line[0], line[1] = gcj02tobd09(line[0],", "sig else: # ... no intermediate points, so transfer current", "object if(point inside multipoly) return true else false \"\"\" coords_array", "math.pi / 180 def number2degree(number): \"\"\" convert radius into degree", "y13, d13, x23, y23, d23; F = (math.pi / 180.0)", "intersects def _bbox_around_polycoords(coords): \"\"\" bounding box \"\"\" x_all = []", "# convert dist to angular distance in radians brng =", "sig_start[n_stack - 1] = start sig_end[n_stack - 1] = sig", "return False inside_poly = False for coord in coords: if", "point -- point geojson object poly -- polygon geojson object", "# check for simple cases count = len(source_coord) if count", "u_a * (a2_x - a1_x), a1_y + u_a * (a2_y", "'coordinates': [ a1_x + u_a * (a2_x - a1_x), a1_y", "transfer current start point index[n_dest] = start n_dest += 1", "+ d13): dev_sqr = d13 else: dev_sqr = (x13 *", "# Now in degrees band_sqr *= band_sqr n_dest = 0", "inside multipoly) return true else false \"\"\" coords_array = [multipoly['coordinates']]", "indexes of source points to include in the reduced line", "To valid whether point or linestring or polygon is inside", "[[0, 0]] for coord in coords: for node in coord:", "return False def number2radius(number): \"\"\" convert degree into radius Keyword", "= False for coord in coords: if inside_poly: break if", "2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))", "sig sig_end[n_stack - 1] = end n_stack += 1 sig_start[n_stack", "start = sig_start[n_stack - 1] end = sig_end[n_stack - 1]", "0 <= u_b and u_b <= 1: intersects.append({'type': 'Point', 'coordinates':", "math.fabs(x23) > 180.0: x23 = 360.0 - math.fabs(x23) x23 *=", "/ 180.0) * 0.5 index = [] # aray of", "b1_y) - \\ (a2_y - a1_y) * (a1_x - b1_x)", "line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y", "or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0]", "2]} def point_distance(point1, point2): \"\"\" calculate the distance between two", "from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon geojson object return", "lat to reduce lng d12 = (x12 * x12) +", "point2 -- point two geojson object if(point inside multipoly) return", "- a1_x) * (a1_y - b1_y) - \\ (a2_y -", "geojson object multipoly -- multipolygon geojson object if(point inside multipoly)", "sig_start[n_stack - 1] = sig sig_end[n_stack - 1] = end", "intermediate points ? # ... yes, so find most deviant", "= source[end][\"lng\"] - source[start][\"lng\"] y12 = source[end][\"lat\"] - source[start][\"lat\"] if", "of polygon Keyword arguments: poly -- polygon geojson object return", "point return destination point object \"\"\" dist = float(dist) /", "\"\"\" bounding box \"\"\" x_all = [] y_all = []", ">= (d12 + d13): dev_sqr = d13 else: dev_sqr =", "1] n_stack -= 1 if (end - start) > 1:", "center) > radius: return False return True def area(poly): \"\"\"", "# convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] #", "node in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside = False", "> 180.0: x13 = 360.0 - math.fabs(x13) x13 *= math.cos(F", "band_sqr: # is there a sig. intermediate point ? #...", "# it's enough to check the exterior ring of the", "<= radius elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon':", "in coords: for node in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0])", "point one geojson object point2 -- point two geojson object", "def point_distance(point1, point2): \"\"\" calculate the distance between two point", "i, sig; # dev_sqr, max_dev_sqr, band_sqr; # x12, y12, d12,", "1 # make return array r = [] for i", "(x13 * y12 - y13 * x12) / d12 #", "source[] array of geojson points kink in metres, kinks above", "u_b = (b2_y - b1_y) * (a2_x - a1_x) -", "inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if not", "return inside def _point_in_polygon(point, coords): inside_box = False for coord", "base point return destination point object \"\"\" dist = float(dist)", "linestrings from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js", "def rectangle_centroid(rectangle): \"\"\" get the centroid of the rectangle Keyword", "xwidth / 2, ymin + ywidth / 2]} def point_distance(point1,", "arguments: point1 -- point one geojson object point2 -- point", "calculate the distance between two point on the sphere like", "line geojson object if(line1 intersects with other) return intersect point", "gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\" To valid whether linestrings from", "* math.pi / 180 def number2degree(number): \"\"\" convert radius into", "1: intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a * (a2_x", "* math.sin(lat2)) lon2 = (lon2 + 3 * math.pi) %", "+ math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1] +", "... yes, so find most deviant intermediate point to either", "-- point geojson object multipoly -- multipolygon geojson object if(point", "> bounds[3]) def _pnpoly(x, y, coords): \"\"\" the algorithm to", "sig = start max_dev_sqr = -1.0 while i < end:", "1] = sig else: # ... no intermediate points, so", "point/linstring/polygon geojson object center -- point geojson object radius --", "start + 1 sig = start max_dev_sqr = -1.0 while", "def centroid(poly): \"\"\" get the centroid of polygon adapted from", "u_a * (a2_y - a1_y)]}) # if len(intersects) == 0:", "* (a1_x - b1_x) ub_t = (a2_x - a1_x) *", "Keyword arguments: point -- point geojson object poly -- polygon", "\"\"\" convert wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\" #", "centroid(poly): \"\"\" get the centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt", "convert dist to angular distance in radians brng = number2radius(brng)", "for first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all),", "= sig else: # ... no intermediate points, so transfer", "(vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1]) *", "point is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert =", "- y13 * x12) * (x13 * y12 - y13", "other) return intersect point array else empty array \"\"\" intersects", "bounds): \"\"\" valid whether the point is inside the bounding", "\"\"\" get the centroid of the rectangle Keyword arguments: rectangle", "/ (vert[j][0] - vert[i][0]) + vert[i][1]): inside = not inside", "{\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source) # count, n_stack, n_dest, start,", "* math.pi * step / steps lat = math.asin(math.sin(rad_center[0]) *", "* 360.0 / (2.0 * math.pi * 6378137.0) # Now", "> y)) and (x < (vert[j][1] - vert[i][1]) * (y", "inside_box: return False inside_poly = False for coord in coords:", "0 sig_start[0] = 0 sig_end[0] = count - 1 n_stack", "True def area(poly): \"\"\" calculate the area of polygon Keyword", "polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0, 0]] for coord", "valid whether the point is inside the bounding box \"\"\"", "object return polygon centroid \"\"\" f_total = 0 x_total =", "15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000)", "15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters", "= points[i][0] p2_x = points[j][1] p2_y = points[j][0] f_total =", "\"\"\" calculate the area of polygon Keyword arguments: poly --", "p2_x * p1_y x_total += (p1_x + p2_x) * f_total", "start, end, i, sig; # dev_sqr, max_dev_sqr, band_sqr; # x12,", "source[end][\"lat\"])) d23 = (x23 * x23) + (y23 * y23)", "* (a2_x - a1_x) - (b2_x - b1_x) * (a2_y", "poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords) def", "= math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng))", "kink in metres, kinks above this depth kept kink depth", "(a2_y - a1_y) * (a1_x - b1_x) u_b = (b2_y", "def area(poly): \"\"\" calculate the area of polygon Keyword arguments:", "x_total / six_area]} def destination_point(point, brng, dist): \"\"\" Calculate a", "- b1_x) u_b = (b2_y - b1_y) * (a2_x -", "true else false \"\"\" coords_array = [multipoly['coordinates']] if multipoly[ 'type']", "= (radius_in_meters / 1000) / 6371 # convert meters to", "source[i][\"lng\"] - source[start][\"lng\"] y13 = source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13)", "the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1", "in range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x", "math.pi # normalise to -180 degree +180 degree return {'type':", "line[0], line[1] = wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry): \"\"\"", "coords_array = [multipoly['coordinates']] if multipoly[ 'type'] == \"MultiPolygon\" else multipoly['coordinates']", "point index[n_dest] = start n_dest += 1 # transfer last", "not inside_box: return False inside_poly = False for coord in", "== 'Point': return point_distance(geometry, center) <= radius elif geometry['type'] ==", "in degrees dist -- distance in Kilometer between destination and", "max(y_all)] def _point_in_bbox(point, bounds): \"\"\" valid whether the point is", "* y12 - y13 * x12) / d12 # solve", "point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] >", "object multipoly -- multipolygon geojson object if(point inside multipoly) return", "a)) return (6371 * c) * 1000 def geometry_within_radius(geometry, center,", "point geojson object multipoly -- multipolygon geojson object if(point inside", "six_area, x_total / six_area]} def destination_point(point, brng, dist): \"\"\" Calculate", "sig = i max_dev_sqr = dev_sqr i += 1 if", "entries off the stacks start = sig_start[n_stack - 1] end", "source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12) > 180.0: x12 = 360.0", "1][1] b2_y = line2['coordinates'][j + 1][0] ua_t = (b2_x -", "poly_area -= p1_y * p2_x j = i poly_area /=", "math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3 * math.pi)", "\"\"\" To valid whether point or linestring or polygon is", "\"\"\" get the centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword", "point to either side of line joining start & end", "+ (y13 * y13) x23 = source[i][\"lng\"] - source[end][\"lng\"] y23", "reduced line sig_start = [] # indices of start &", "= coordinate if point_distance(point, center) > radius: return False return", "reduce lng d12 = (x12 * x12) + (y12 *", "* 1000 def geometry_within_radius(geometry, center, radius): \"\"\" To valid whether", "% (2 * math.pi) - math.pi # normalise to -180", "- 1] n_stack -= 1 if (end - start) >", "_point_in_polygon(point, coords): inside_box = False for coord in coords: if", "== 'LineString' or geometry['type'] == 'Polygon': point = {} #", "# indices of start & end of working section sig_end", "/ d12 # solve triangle if dev_sqr > max_dev_sqr: sig", "points[j][1] p2_y = points[j][0] f_total = p1_x * p2_y -", "xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax -", "point['coordinates'][0] > bounds[3]) def _pnpoly(x, y, coords): \"\"\" the algorithm", "and u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x +", "* (a2_y - a1_y) if not u_b == 0: u_a", "geojson object return polygon area \"\"\" poly_area = 0 #", "* math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a),", "* p1_y x_total += (p1_x + p2_x) * f_total y_total", "TODO: point linestring point if geometry['type'] == 'MultiLineString': coordinates =", "to judge whether the point is located in polygon reference:", "math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle):", "math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 =", "start max_dev_sqr = -1.0 while i < end: x13 =", "= [] y_all = [] for first in coords[0]: x_all.append(first[1])", "- source[start][\"lat\"] if math.fabs(x13) > 180.0: x13 = 360.0 -", "x13 = source[i][\"lng\"] - source[start][\"lng\"] y13 = source[i][\"lat\"] - source[start][\"lat\"]", "degree Keyword arguments: number -- radius return degree \"\"\" return", "+ xwidth / 2, ymin + ywidth / 2]} def", "* math.pi * 6378137.0) # Now in degrees band_sqr *=", "band_sqr n_dest = 0 sig_start[0] = 0 sig_end[0] = count", "if poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords)", "1 and 0 <= u_b and u_b <= 1: intersects.append({'type':", "180.0) * 0.5 index = [] # aray of indexes", "< len(vert): if ((vert[i][0] > y) != (vert[j][0] > y))", "\"\"\" To valid whether linestrings from geojson are intersected with", "end = sig_end[n_stack - 1] n_stack -= 1 if (end", "(vert[j][0] - vert[i][0]) + vert[i][1]): inside = not inside j", "radius Keyword arguments: number -- degree return radius \"\"\" return", "bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1]", "- a1_y) if not u_b == 0: u_a = ua_t", "line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i + 1][0] b1_x =", "def _point_in_polygon(point, coords): inside_box = False for coord in coords:", "geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point,", "_bbox_around_polycoords(coord)): inside_box = True if not inside_box: return False inside_poly", "make return array r = [] for i in range(0,", "(donut polygon is not supported) Keyword arguments: point -- point", "destination Point base on a base point and a distance", "end, i, sig; # dev_sqr, max_dev_sqr, band_sqr; # x12, y12,", "#any intermediate points ? # ... yes, so find most", "+ d23): dev_sqr = d23 elif d23 >= (d12 +", "sig_start[n_stack - 1] end = sig_end[n_stack - 1] n_stack -=", "* (a2_x - a1_x), a1_y + u_a * (a2_y -", "2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return", "i in range(0, count): p1_x = points[i][1] p1_y = points[i][0]", "break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True return inside_poly", "polygon centroid \"\"\" f_total = 0 x_total = 0 y_total", "< 3: return source_coord # one or two points #", "= True return inside_poly def point_in_polygon(point, poly): \"\"\" valid whether", "y23) if d13 >= (d12 + d23): dev_sqr = d23", "else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate if", "math.sin(lat2)) lon2 = (lon2 + 3 * math.pi) % (2", "15 sided circle poly = [] for step in range(0,", "in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng,", "* (source[i][\"lat\"] + source[end][\"lat\"])) d23 = (x23 * x23) +", "points[j][1] p2_y = points[j][0] poly_area += p1_x * p2_y poly_area", "the centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly", "= lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) -", "polygon is not supported) Keyword arguments: point -- point geojson", "[min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds): \"\"\" valid whether", "1] = start sig_end[n_stack - 1] = sig else: #", "vert[i][0]) + vert[i][1]): inside = not inside j = i", "* math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2", "a radius around a center Keyword arguments: geometry -- point/linstring/polygon", "to bd referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring", "- ymin return {'type': 'Point', 'coordinates': [xmin + xwidth /", "= bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth =", "* math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2", "i += 1 if max_dev_sqr < band_sqr: # is there", "i < end: x13 = source[i][\"lng\"] - source[start][\"lng\"] y13 =", "Keyword arguments: poly -- polygon geojson object return polygon centroid", "= point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1)", "= bbox[2][1] xwidth = xmax - xmin ywidth = ymax", "= i six_area = area(poly) * 6 return {'type': 'Point',", "- source[start][\"lng\"] y12 = source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12) >", "-- polygon geojson object return polygon centroid \"\"\" f_total =", "linestrings_intersect(line1, line2): \"\"\" To valid whether linestrings from geojson are", "d23 elif d23 >= (d12 + d13): dev_sqr = d13", "there a sig. intermediate point ? #... no, so transfer", "* x13) + (y13 * y13) x23 = source[i][\"lng\"] -", "source points to include in the reduced line sig_start =", "if max_dev_sqr < band_sqr: # is there a sig. intermediate", "= 2 * math.pi * step / steps lat =", "calculate the area of polygon Keyword arguments: poly -- polygon", "dist = float(dist) / 6371 # convert dist to angular", "0.5 index = [] # aray of indexes of source", "intersects with other) return intersect point array else empty array", "centroid \"\"\" bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin =", "- vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]): inside =", "gcj2bd(geometry): \"\"\" convert gcj to bd referencing by https://github.com/wandergis/coordTransform_py \"\"\"", "-- polygon geojson object return centroid \"\"\" bbox = rectangle['coordinates'][0]", "y12, d12, x13, y13, d13, x23, y23, d23; F =", "count): p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1]", "coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates']", "i poly_area /= 2 return poly_area def centroid(poly): \"\"\" get", "p2_y poly_area -= p1_y * p2_x j = i poly_area", "false \"\"\" steps = steps if steps > 15 else", "referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring point if", "- 1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x =", "center -- point geojson object radius -- radius if(geometry inside", "-- multipolygon geojson object if(point inside multipoly) return true else", "\"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\" get the centroid of", "= points[j][0] poly_area += p1_x * p2_y poly_area -= p1_y", "# normalise to -180 degree +180 degree return {'type': 'Point',", "from coordTransform_utils import wgs84togcj02 from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1,", "either side of line joining start & end points x12", "max_dev_sqr, band_sqr; # x12, y12, d12, x13, y13, d13, x23,", "-- polygon geojson object brng -- an angle in degrees", "radius): \"\"\" To valid whether point or linestring or polygon", "= d13 else: dev_sqr = (x13 * y12 - y13", "check for simple cases count = len(source_coord) if count <", "inside_poly def point_in_polygon(point, poly): \"\"\" valid whether the point is", "[] # check for simple cases count = len(source_coord) if", "true else false \"\"\" if geometry['type'] == 'Point': return point_distance(geometry,", "a1_y) * (a1_x - b1_x) u_b = (b2_y - b1_y)", "== 'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] =", "[] for first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all),", "+ source[start][\"lat\"])) # use avg lat to reduce lng d12", "# transfer last point index[n_dest] = count - 1 n_dest", "dist = (radius_in_meters / 1000) / 6371 # convert meters", "r.append(source_coord[index[i]]) return map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r) def", "between two point on the sphere like google map reference", "number2degree(lat2)]} def simplify(source, kink=20): \"\"\" source[] array of geojson points", "== \"MultiPolygon\" else multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point,", "\"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\" get the centroid of the", "(a2_x - a1_x) - (b2_x - b1_x) * (a2_y -", "radius: return False return True def area(poly): \"\"\" calculate the", "working section sig_end = [] # check for simple cases", "radius elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point", "points[j][0] f_total = p1_x * p2_y - p2_x * p1_y", "or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) def _pnpoly(x,", "Point base on a base point and a distance Keyword", "return number * math.pi / 180 def number2degree(number): \"\"\" convert", "the point is located in a polygon Keyword arguments: point", "\"\"\" # TODO: point linestring point if geometry['type'] == 'MultiLineString':", "the height of the triangle abc where a-b and b-c", "(a1_x - b1_x) ub_t = (a2_x - a1_x) * (a1_y", "_point_in_polygon(point, coords): return True return False def number2radius(number): \"\"\" convert", "TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j =", "= (x12 * x12) + (y12 * y12) i =", "(y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]): inside", "* c) * 1000 def geometry_within_radius(geometry, center, radius): \"\"\" To", "*= math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"])) # use avg lat", "centroid \"\"\" f_total = 0 x_total = 0 y_total =", "a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i +", "len(points) - 1 count = len(points) for i in range(0,", "- math.pi # normalise to -180 degree +180 degree return", "lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2", "object return polygon area \"\"\" poly_area = 0 # TODO:", "= line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1]", "a center Keyword arguments: geometry -- point/linstring/polygon geojson object center", "'coordinates': [xmin + xwidth / 2, ymin + ywidth /", "geojson object line2 -- second line geojson object if(line1 intersects", "is not supported) Keyword arguments: point -- point geojson object", "points = poly['coordinates'][0] j = len(points) - 1 count =", "1 n_stack = 1 # while the stack is not", "geojson object return polygon centroid \"\"\" f_total = 0 x_total", "/ six_area]} def destination_point(point, brng, dist): \"\"\" Calculate a destination", "coord): inside_poly = True return inside_poly def point_in_polygon(point, poly): \"\"\"", "- source[start][\"lng\"] y13 = source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13) >", "return point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString' or", "[xmin + xwidth / 2, ymin + ywidth / 2]}", "for i in range(0, count): p1_x = points[i][1] p1_y =", "0 # TODO: polygon holes at coordinates[1] points = poly['coordinates'][0]", "base on a base point and a distance Keyword arguments:", "> 1: #any intermediate points ? # ... yes, so", "* p2_y poly_area -= p1_y * p2_x j = i", "= 360.0 - math.fabs(x23) x23 *= math.cos(F * (source[i][\"lat\"] +", "\"lat\": o.coordinates[1]}, source) # count, n_stack, n_dest, start, end, i,", "intersect point array else empty array \"\"\" intersects = []", "brng -- an angle in degrees dist -- distance in", "d13 else: dev_sqr = (x13 * y12 - y13 *", "#... no, so transfer current start point index[n_dest] = start", "a sig. intermediate point ? #... no, so transfer current", "the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else", "= number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist)", "source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23) > 180.0: x23 = 360.0", "multipoly[ 'type'] == \"MultiPolygon\" else multipoly['coordinates'] for coords in coords_array:", "or two points # more complex case. initialize stack band_sqr", "= [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371", "-- point one geojson object point2 -- point two geojson", "# more complex case. initialize stack band_sqr = kink *", "len(intersects) == 0: # intersects = False return intersects def", "point -- point geojson object multipoly -- multipolygon geojson object", "x13, y13, d13, x23, y23, d23; F = (math.pi /", "= not inside j = i i += 1 return", "array \"\"\" intersects = [] for i in range(0, len(line1['coordinates'])", "x23 *= math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"])) d23 = (x23", "'Point', 'coordinates': [y_total / six_area, x_total / six_area]} def destination_point(point,", "= (x13 * x13) + (y13 * y13) x23 =", "of line joining start & end points x12 = source[end][\"lng\"]", "number2degree(number): \"\"\" convert radius into degree Keyword arguments: number --", "= rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) -", "- vert[i][1]) * (y - vert[i][0]) / (vert[j][0] - vert[i][0])", "bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth = xmax", "coord in coords: if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord):", "p2_x = points[j][1] p2_y = points[j][0] f_total = p1_x *", "https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring point if geometry['type'] ==", "i i += 1 return inside def _point_in_polygon(point, coords): inside_box", "[] y_all = [] for first in coords[0]: x_all.append(first[1]) y_all.append(first[0])", "-- polygon geojson object if(point inside poly) return true else", "len(vert) - 1 while i < len(vert): if ((vert[i][0] >", "= i poly_area /= 2 return poly_area def centroid(poly): \"\"\"", "y23, d23; F = (math.pi / 180.0) * 0.5 index", "c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371", "lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist)", "[ a1_x + u_a * (a2_x - a1_x), a1_y +", "\"\"\" return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or", "http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object line2", "point ? #... no, so transfer current start point index[n_dest]", "arguments: number -- degree return radius \"\"\" return number *", "y_all = [] for first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return", "\"\"\" return number * 180 / math.pi def draw_circle(radius_in_meters, center_point,", "the rectangle Keyword arguments: rectangle -- polygon geojson object return", "triangle abc where a-b and b-c are two consecutive line", "* math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng),", "start & end of working section sig_end = [] #", "map(lambda o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source) # count, n_stack,", "is the height of the triangle abc where a-b and", "== 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly):", "points x12 = source[end][\"lng\"] - source[start][\"lng\"] y12 = source[end][\"lat\"] -", "in coords: if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly", "area \"\"\" poly_area = 0 # TODO: polygon holes at", "xmin ywidth = ymax - ymin return {'type': 'Point', 'coordinates':", "u_a = ua_t / u_b u_b = ub_t / u_b", "valid whether the point is located in a mulitpolygon (donut", "arguments: point -- point geojson object poly -- polygon geojson", "aray of indexes of source points to include in the", "inside = False i = 0 j = len(vert) -", "points kink in metres, kinks above this depth kept kink", "False i = 0 j = len(vert) - 1 while", "not u_b == 0: u_a = ua_t / u_b u_b", "bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin ywidth", "y23 = source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23) > 180.0: x23", "not empty while n_stack > 0: # ... pop the", "and base point return destination point object \"\"\" dist =", "x12 = 360.0 - math.fabs(x12) x12 *= math.cos(F * (source[end][\"lat\"]", "1][0] ua_t = (b2_x - b1_x) * (a1_y - b1_y)", "an angle in degrees dist -- distance in Kilometer between", "sig_start[0] = 0 sig_end[0] = count - 1 n_stack =", "math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"])) # use avg lat to", "inside the bounding box \"\"\" return not(point['coordinates'][1] < bounds[0] or", "line segments \"\"\" source_coord = map(lambda o: {\"lng\": o.coordinates[0], \"lat\":", "n_stack = 1 # while the stack is not empty", ">= (d12 + d23): dev_sqr = d23 elif d23 >=", "lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) *", "geometry['type'] == 'Polygon': point = {} # it's enough to", "while n_stack > 0: # ... pop the top-most entries", "geometry['type'] == 'Point': return point_distance(geometry, center) <= radius elif geometry['type']", "/ u_b u_b = ub_t / u_b if 0 <=", "a circle shape polygon based on centerPoint and radius Keyword", "# ... pop the top-most entries off the stacks start", "for j in range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1]", "x12 = source[end][\"lng\"] - source[start][\"lng\"] y12 = source[end][\"lat\"] - source[start][\"lat\"]", "return polygon centroid \"\"\" f_total = 0 x_total = 0", "source_coord = map(lambda o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source) #", "convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15", "+ math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) *", "ua_t = (b2_x - b1_x) * (a1_y - b1_y) -", "number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1)", "= i max_dev_sqr = dev_sqr i += 1 if max_dev_sqr", "+ p2_y) * f_total j = i six_area = area(poly)", "u_b = ub_t / u_b if 0 <= u_a and", "= len(points) for i in range(0, count): p1_x = points[i][1]", "i in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o: {\"type\": \"Point\",\"coordinates\":", "coordinates = geometry['coordinates'] for lines in coordinates: for line in", "for coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point, center)", "* f_total j = i six_area = area(poly) * 6", "x12, y12, d12, x13, y13, d13, x23, y23, d23; F", "y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds): \"\"\"", "rectangle_centroid(rectangle): \"\"\" get the centroid of the rectangle Keyword arguments:", "multipolygon geojson object if(point inside multipoly) return true else false", "last point index[n_dest] = count - 1 n_dest += 1", "= p1_x * p2_y - p2_x * p1_y x_total +=", "* math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) *", "dev_sqr i += 1 if max_dev_sqr < band_sqr: # is", "> 180.0: x23 = 360.0 - math.fabs(x23) x23 *= math.cos(F", "valid whether the point is located in a polygon Keyword", "sig_end[n_stack - 1] n_stack -= 1 if (end - start)", "- 1): for j in range(0, len(line2['coordinates']) - 1): a1_x", "= source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13) > 180.0: x13 =", "1][1] a2_y = line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y", "return polygon area \"\"\" poly_area = 0 # TODO: polygon", "kink * 360.0 / (2.0 * math.pi * 6378137.0) #", "if d13 >= (d12 + d23): dev_sqr = d23 elif", "n_stack, n_dest, start, end, i, sig; # dev_sqr, max_dev_sqr, band_sqr;", "points # more complex case. initialize stack band_sqr = kink", "= [] for first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all),", "f_total = 0 x_total = 0 y_total = 0 #", "i = start + 1 sig = start max_dev_sqr =", "1 sig_start[n_stack - 1] = sig sig_end[n_stack - 1] =", "1 sig_start[n_stack - 1] = start sig_end[n_stack - 1] =", "* y13) x23 = source[i][\"lng\"] - source[end][\"lng\"] y23 = source[i][\"lat\"]", "(source[i][\"lat\"] + source[end][\"lat\"])) d23 = (x23 * x23) + (y23", "six_area]} def destination_point(point, brng, dist): \"\"\" Calculate a destination Point", "if dev_sqr > max_dev_sqr: sig = i max_dev_sqr = dev_sqr", "Keyword arguments: pt -- polygon geojson object brng -- an", "inside_poly = False for coord in coords: if inside_poly: break", "math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng)", "bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) def", "point or linestring or polygon is inside a radius around", "'Polygon': point = {} # it's enough to check the", "def number2radius(number): \"\"\" convert degree into radius Keyword arguments: number", "polygon Keyword arguments: poly -- polygon geojson object return polygon", "points[j][0] poly_area += p1_x * p2_y poly_area -= p1_y *", "source[i][\"lng\"] - source[end][\"lng\"] y23 = source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23)", "geojson points kink in metres, kinks above this depth kept", "+= (p1_x + p2_x) * f_total y_total += (p1_y +", "= 0 j = len(vert) - 1 while i <", "math.fabs(x23) x23 *= math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"])) d23 =", "-= p1_y * p2_x j = i poly_area /= 2", "coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for", "for coord in coords: if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0],", "/ steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) *", "dist -- distance in Kilometer between destination and base point", "= line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i", "b1_y) * (a2_x - a1_x) - (b2_x - b1_x) *", "bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax", "ub_t / u_b if 0 <= u_a and u_a <=", "return true else false \"\"\" coords_array = [multipoly['coordinates']] if multipoly[", "point on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword", "in degrees band_sqr *= band_sqr n_dest = 0 sig_start[0] =", "math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2 *", "polygon area \"\"\" poly_area = 0 # TODO: polygon holes", "= start + 1 sig = start max_dev_sqr = -1.0", "x13) + (y13 * y13) x23 = source[i][\"lng\"] - source[end][\"lng\"]", "max_dev_sqr: sig = i max_dev_sqr = dev_sqr i += 1", "(a2_x - a1_x), a1_y + u_a * (a2_y - a1_y)]})", "else multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point, coords): return", "inside_box = True if not inside_box: return False inside_poly =", "the triangle abc where a-b and b-c are two consecutive", "in range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y =", "inside multipoly) return true else false \"\"\" lon1 = point1['coordinates'][0]", "of geojson points kink in metres, kinks above this depth", "(x13 * y12 - y13 * x12) * (x13 *", "step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0])", "in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside = False i", "p2_y - p2_x * p1_y x_total += (p1_x + p2_x)", "- b1_y) - \\ (b2_y - b1_y) * (a1_x -", "(x23 * x23) + (y23 * y23) if d13 >=", "> bounds[2] or point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3])", "line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x", "else: dev_sqr = (x13 * y12 - y13 * x12)", "bounding box \"\"\" return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] >", "(math.pi / 180.0) * 0.5 index = [] # aray", "return map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r) def wgs2gcj(geometry):", "(d12 + d13): dev_sqr = d13 else: dev_sqr = (x13", "distance Keyword arguments: pt -- polygon geojson object brng --", "+= 1 else: # ... yes, so push two sub-sections", "line[1] = wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry): \"\"\" convert", "= bbox[2][0] ymax = bbox[2][1] xwidth = xmax - xmin", "arguments: rectangle -- polygon geojson object return centroid \"\"\" bbox", "in a polygon Keyword arguments: point -- point geojson object", "y13 * x12) / d12 # solve triangle if dev_sqr", "if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in coordinates:", "* y12) i = start + 1 sig = start", "* (a2_y - a1_y)]}) # if len(intersects) == 0: #", "/ (2.0 * math.pi * 6378137.0) # Now in degrees", "(x < (vert[j][1] - vert[i][1]) * (y - vert[i][0]) /", "+= 1 if max_dev_sqr < band_sqr: # is there a", "math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\",", "== 'Polygon': point = {} # it's enough to check", "so transfer current start point index[n_dest] = start n_dest +=", "- 1] = start sig_end[n_stack - 1] = sig else:", "coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point,", "a2_x = line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i + 1][0]", "dev_sqr > max_dev_sqr: sig = i max_dev_sqr = dev_sqr i", "/ math.pi def draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get a circle", "coords: if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True", "\"\"\" the algorithm to judge whether the point is located", "y, coords): \"\"\" the algorithm to judge whether the point", "polygon geojson object brng -- an angle in degrees dist", "/ 180 def number2degree(number): \"\"\" convert radius into degree Keyword", "180 def number2degree(number): \"\"\" convert radius into degree Keyword arguments:", "= points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y =", "range(0, count): p1_x = points[i][1] p1_y = points[i][0] p2_x =", "get a circle shape polygon based on centerPoint and radius", "b1_y) * (a1_x - b1_x) ub_t = (a2_x - a1_x)", "dev_sqr = d13 else: dev_sqr = (x13 * y12 -", "the point is inside the bounding box \"\"\" return not(point['coordinates'][1]", "0]] for coord in coords: for node in coord: vert.append(node)", "sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 --", "source[start][\"lng\"] y13 = source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13) > 180.0:", "vert[i][1]) * (y - vert[i][0]) / (vert[j][0] - vert[i][0]) +", "arguments: pt -- polygon geojson object brng -- an angle", "len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x", "u_b == 0: u_a = ua_t / u_b u_b =", "2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c)", "* x12) + (y12 * y12) i = start +", "a1_x), a1_y + u_a * (a2_y - a1_y)]}) # if", "point two geojson object if(point inside multipoly) return true else", "bounds[3]) def _pnpoly(x, y, coords): \"\"\" the algorithm to judge", "_point_in_bbox(point, bounds): \"\"\" valid whether the point is inside the", "Keyword arguments: poly -- polygon geojson object return polygon area", "distance in Kilometer between destination and base point return destination", "source[start][\"lat\"])) d13 = (x13 * x13) + (y13 * y13)", "lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 -", "+= 1 return inside def _point_in_polygon(point, coords): inside_box = False", "x_all = [] y_all = [] for first in coords[0]:", "2 * math.pi * step / steps lat = math.asin(math.sin(rad_center[0])", "adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon geojson object", "2) + math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2),", "- 1] = end n_stack += 1 sig_start[n_stack - 1]", "end: x13 = source[i][\"lng\"] - source[start][\"lng\"] y13 = source[i][\"lat\"] -", "two point on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html", "if math.fabs(x23) > 180.0: x23 = 360.0 - math.fabs(x23) x23", "\"\"\" x_all = [] y_all = [] for first in", "360.0 - math.fabs(x23) x23 *= math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"]))", "'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates'] = coordinate", "to reduce lng d12 = (x12 * x12) + (y12", "push two sub-sections on stack for further processing n_stack +=", "of start & end of working section sig_end = []", "(y13 * y13) x23 = source[i][\"lng\"] - source[end][\"lng\"] y23 =", "in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0, 0]] for", "False for coord in coords: if inside_box: break if _point_in_bbox(point,", "more complex case. initialize stack band_sqr = kink * 360.0", "6371 # convert meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])]", "\\ (a2_y - a1_y) * (a1_x - b1_x) u_b =", "stack is not empty while n_stack > 0: # ...", "map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r) def wgs2gcj(geometry): \"\"\"", "point['coordinates'] = coordinate if point_distance(point, center) > radius: return False", "# TODO: polygon holes at coordinates[1] points = poly['coordinates'][0] j", "1] end = sig_end[n_stack - 1] n_stack -= 1 if", "= -1.0 while i < end: x13 = source[i][\"lng\"] -", "= start n_dest += 1 else: # ... yes, so", "else: # ... no intermediate points, so transfer current start", "line in lines: line[0], line[1] = gcj02tobd09(line[0], line[1]) return geometry", "== 0: # intersects = False return intersects def _bbox_around_polycoords(coords):", "# dev_sqr, max_dev_sqr, band_sqr; # x12, y12, d12, x13, y13,", "y12) i = start + 1 sig = start max_dev_sqr", "point['coordinates'][0] < bounds[1] or point['coordinates'][0] > bounds[3]) def _pnpoly(x, y,", "False for coord in coords: if inside_poly: break if _pnpoly(point['coordinates'][1],", "count, n_stack, n_dest, start, end, i, sig; # dev_sqr, max_dev_sqr,", "start) > 1: #any intermediate points ? # ... yes,", "p1_x * p2_y poly_area -= p1_y * p2_x j =", "\"\"\" intersects = [] for i in range(0, len(line1['coordinates']) -", "-- first line geojson object line2 -- second line geojson", "(b2_y - b1_y) * (a1_x - b1_x) ub_t = (a2_x", "the distance between two point on the sphere like google", "poly): \"\"\" valid whether the point is located in a", "def destination_point(point, brng, dist): \"\"\" Calculate a destination Point base", "false \"\"\" coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon'", "{'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin +", "return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): \"\"\" valid whether the", "line2): \"\"\" To valid whether linestrings from geojson are intersected", "for step in range(0, steps): brng = 2 * math.pi", "point1 -- point one geojson object point2 -- point two", "object center -- point geojson object radius -- radius if(geometry", "step in range(0, steps): brng = 2 * math.pi *", "+ 1][1] b2_y = line2['coordinates'][j + 1][0] ua_t = (b2_x", "360.0 / (2.0 * math.pi * 6378137.0) # Now in", "\"\"\" dist = float(dist) / 6371 # convert dist to", "coord in coords: for node in coord: vert.append(node) vert.append(coord[0]) vert.append([0,", "n_stack += 1 sig_start[n_stack - 1] = start sig_end[n_stack -", "inside multipoly) return true else false \"\"\" steps = steps", "+ (y23 * y23) if d13 >= (d12 + d23):", "= [multipoly['coordinates']] if multipoly[ 'type'] == \"MultiPolygon\" else multipoly['coordinates'] for", "def point_in_polygon(point, poly): \"\"\" valid whether the point is located", "float(dist) / 6371 # convert dist to angular distance in", "object if(point inside poly) return true else false \"\"\" coords", "x13 = 360.0 - math.fabs(x13) x13 *= math.cos(F * (source[i][\"lat\"]", "radius Keyword arguments: point1 -- point one geojson object point2", "of the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon'", "yes, so push two sub-sections on stack for further processing", "gcj to bd referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point", "point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 -", "a distance Keyword arguments: pt -- polygon geojson object brng", "b1_x) * (a2_y - a1_y) if not u_b == 0:", "geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments:", "* (x13 * y12 - y13 * x12) / d12", "\"\"\" Calculate a destination Point base on a base point", "box \"\"\" return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2]", "geometry def gcj2bd(geometry): \"\"\" convert gcj to bd referencing by", "j = i i += 1 return inside def _point_in_polygon(point,", "len(line1['coordinates']) - 1): for j in range(0, len(line2['coordinates']) - 1):", "false \"\"\" lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 =", "> 0: # ... pop the top-most entries off the", "= line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1]", "x_total = 0 y_total = 0 # TODO: polygon holes", "center, radius): \"\"\" To valid whether point or linestring or", "... yes, so push two sub-sections on stack for further", "brng = 2 * math.pi * step / steps lat", "u_b if 0 <= u_a and u_a <= 1 and", "n_stack > 0: # ... pop the top-most entries off", "case. initialize stack band_sqr = kink * 360.0 / (2.0", "= points[j][0] f_total = p1_x * p2_y - p2_x *", "into degree Keyword arguments: number -- radius return degree \"\"\"", "- \\ (a2_y - a1_y) * (a1_x - b1_x) u_b", "= len(points) - 1 count = len(points) for i in", "in Kilometer between destination and base point return destination point", "bounds[1] or point['coordinates'][0] > bounds[3]) def _pnpoly(x, y, coords): \"\"\"", "or linestring or polygon is inside a radius around a", "normalise to -180 degree +180 degree return {'type': 'Point', 'coordinates':", "inside radius) return true else false \"\"\" if geometry['type'] ==", "or geometry['type'] == 'Polygon': point = {} # it's enough", "if 0 <= u_a and u_a <= 1 and 0", "* p2_x j = i poly_area /= 2 return poly_area", "if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for lines in", "n_dest += 1 # make return array r = []", "whether point or linestring or polygon is inside a radius", "gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring point", "reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0, 0]] for coord in", "while the stack is not empty while n_stack > 0:", "u_a <= 1 and 0 <= u_b and u_b <=", "rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0])", "= ub_t / u_b if 0 <= u_a and u_a", "in the reduced line sig_start = [] # indices of", "line2 -- second line geojson object if(line1 intersects with other)", "# x12, y12, d12, x13, y13, d13, x23, y23, d23;", "whether the point is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\"", "and radius Keyword arguments: point1 -- point one geojson object", "[] # indices of start & end of working section", "* 6 return {'type': 'Point', 'coordinates': [y_total / six_area, x_total", "= xmax - xmin ywidth = ymax - ymin return", "* math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng =", "to include in the reduced line sig_start = [] #", "coords): inside_box = False for coord in coords: if inside_box:", "(y23 * y23) if d13 >= (d12 + d23): dev_sqr", "/ 6371 # convert dist to angular distance in radians", "start n_dest += 1 # transfer last point index[n_dest] =", "the area of polygon Keyword arguments: poly -- polygon geojson", "+ math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) *", "< bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1]", "'MultiLineString': coordinates = geometry['coordinates'] for lines in coordinates: for line", "else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters /", "object \"\"\" dist = float(dist) / 6371 # convert dist", "points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0]", "(x12 * x12) + (y12 * y12) i = start", "1] = end n_stack += 1 sig_start[n_stack - 1] =", "bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] < bounds[1] or", "b-c are two consecutive line segments \"\"\" source_coord = map(lambda", "6371 # convert dist to angular distance in radians brng", "points to include in the reduced line sig_start = []", "lng d12 = (x12 * x12) + (y12 * y12)", "= geometry['coordinates'] for lines in coordinates: for line in lines:", "= number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 =", "-- radius return degree \"\"\" return number * 180 /", "+ 1][1] a2_y = line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1]", "return array r = [] for i in range(0, n_dest):", "math.pi) % (2 * math.pi) - math.pi # normalise to", "= math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2))", "- b1_y) * (a1_x - b1_x) ub_t = (a2_x -", "range(0, steps): brng = 2 * math.pi * step /", "(x13 * x13) + (y13 * y13) x23 = source[i][\"lng\"]", "return intersect point array else empty array \"\"\" intersects =", "vert = [[0, 0]] for coord in coords: for node", "return {'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]}", "2 return poly_area def centroid(poly): \"\"\" get the centroid of", "3: return source_coord # one or two points # more", "y13) x23 = source[i][\"lng\"] - source[end][\"lng\"] y23 = source[i][\"lat\"] -", "+= p1_x * p2_y poly_area -= p1_y * p2_x j", "of the rectangle Keyword arguments: rectangle -- polygon geojson object", "b1_x) * (a1_y - b1_y) - \\ (b2_y - b1_y)", "def geometry_within_radius(geometry, center, radius): \"\"\" To valid whether point or", "= points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area +=", "- (b2_x - b1_x) * (a2_y - a1_y) if not", "= [] # indices of start & end of working", "is located in a mulitpolygon (donut polygon is not supported)", "for coord in coords: for node in coord: vert.append(node) vert.append(coord[0])", "* y12 - y13 * x12) * (x13 * y12", "# TODO: point linestring point if geometry['type'] == 'MultiLineString': coordinates", "u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a", "* math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\": [poly]} def", "math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 +", "consecutive line segments \"\"\" source_coord = map(lambda o: {\"lng\": o.coordinates[0],", "pop the top-most entries off the stacks start = sig_start[n_stack", "- math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3 *", "to angular distance in radians brng = number2radius(brng) lon1 =", "and (x < (vert[j][1] - vert[i][1]) * (y - vert[i][0])", "i six_area = area(poly) * 6 return {'type': 'Point', 'coordinates':", "(a1_y - b1_y) - \\ (b2_y - b1_y) * (a1_x", "[] # aray of indexes of source points to include", "triangle if dev_sqr > max_dev_sqr: sig = i max_dev_sqr =", "geojson object point2 -- point two geojson object if(point inside", "steps > 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist", "+ ywidth / 2]} def point_distance(point1, point2): \"\"\" calculate the", "inside_box = False for coord in coords: if inside_box: break", "_pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True return inside_poly def point_in_polygon(point,", "+180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source,", "- source[end][\"lat\"] if math.fabs(x23) > 180.0: x23 = 360.0 -", "- 1 n_stack = 1 # while the stack is", "with other) return intersect point array else empty array \"\"\"", "located in a polygon Keyword arguments: point -- point geojson", "coords in coords_array: if _point_in_polygon(point, coords): return True return False", "o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r) def wgs2gcj(geometry): \"\"\" convert", "(6371 * c) * 1000 def geometry_within_radius(geometry, center, radius): \"\"\"", "to either side of line joining start & end points", "lines in coordinates: for line in lines: line[0], line[1] =", "True return False def number2radius(number): \"\"\" convert degree into radius", "u_a and u_a <= 1 and 0 <= u_b and", "range(0, len(line1['coordinates']) - 1): for j in range(0, len(line2['coordinates']) -", "# ... yes, so find most deviant intermediate point to", "a base point and a distance Keyword arguments: pt --", "poly -- polygon geojson object if(point inside poly) return true", "the stacks start = sig_start[n_stack - 1] end = sig_end[n_stack", "geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for lines in coordinates:", "(end - start) > 1: #any intermediate points ? #", "& end points x12 = source[end][\"lng\"] - source[start][\"lng\"] y12 =", "math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\":", "= sig_start[n_stack - 1] end = sig_end[n_stack - 1] n_stack", "= point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat =", "number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2) +", "simplify(source, kink=20): \"\"\" source[] array of geojson points kink in", "points[i][0] p2_x = points[j][1] p2_y = points[j][0] f_total = p1_x", "poly['coordinates'][0] j = len(points) - 1 count = len(points) for", "number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist)", "range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]},", "and a distance Keyword arguments: pt -- polygon geojson object", "at coordinates[1] points = poly['coordinates'][0] j = len(points) - 1", "math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1) * math.sin(lat2))", "on a base point and a distance Keyword arguments: pt", "intermediate point ? #... no, so transfer current start point", "return False return True def area(poly): \"\"\" calculate the area", "def gcj2bd(geometry): \"\"\" convert gcj to bd referencing by https://github.com/wandergis/coordTransform_py", "* math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 * c) *", "- lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat", "source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13) > 180.0: x13 = 360.0", "true else false \"\"\" steps = steps if steps >", "= rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax =", "Calculate a destination Point base on a base point and", "point_distance(point, center) > radius: return False return True def area(poly):", "math.sqrt(1 - a)) return (6371 * c) * 1000 def", "y13 = source[i][\"lat\"] - source[start][\"lat\"] if math.fabs(x13) > 180.0: x13", "points ? # ... yes, so find most deviant intermediate", "p2_y) * f_total j = i six_area = area(poly) *", "x13 *= math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"])) d13 = (x13", "google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one", "1: #any intermediate points ? # ... yes, so find", "end n_stack += 1 sig_start[n_stack - 1] = start sig_end[n_stack", "geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in", "convert radius into degree Keyword arguments: number -- radius return", "index[n_dest] = start n_dest += 1 # transfer last point", "inside j = i i += 1 return inside def", "points, so transfer current start point index[n_dest] = start n_dest", "((vert[i][0] > y) != (vert[j][0] > y)) and (x <", "a1_x) - (b2_x - b1_x) * (a2_y - a1_y) if", "i = 0 j = len(vert) - 1 while i", "0 x_total = 0 y_total = 0 # TODO: polygon", "180 / math.pi def draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get a", "are two consecutive line segments \"\"\" source_coord = map(lambda o:", "where a-b and b-c are two consecutive line segments \"\"\"", "algorithm to judge whether the point is located in polygon", "-1.0 while i < end: x13 = source[i][\"lng\"] - source[start][\"lng\"]", "enough to check the exterior ring of the Polygon coordinates", "math from coordTransform_utils import wgs84togcj02 from coordTransform_utils import gcj02tobd09 def", "if(point inside poly) return true else false \"\"\" coords =", "wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point", "line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i +", "radius \"\"\" return number * math.pi / 180 def number2degree(number):", "-- degree return radius \"\"\" return number * math.pi /", "'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): \"\"\" source[] array", "point is located in a polygon Keyword arguments: point --", "for coords in coords_array: if _point_in_polygon(point, coords): return True return", "d13): dev_sqr = d13 else: dev_sqr = (x13 * y12", "solve triangle if dev_sqr > max_dev_sqr: sig = i max_dev_sqr", "if count < 3: return source_coord # one or two", "2), 2) + math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon /", "if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if not inside_box: return", "n_dest): r.append(source_coord[index[i]]) return map(lambda o: {\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r)", "polygon geojson object return centroid \"\"\" bbox = rectangle['coordinates'][0] xmin", "math.fabs(x13) > 180.0: x13 = 360.0 - math.fabs(x13) x13 *=", "coords): return True return False def number2radius(number): \"\"\" convert degree", "distance between two point on the sphere like google map", "angle in degrees dist -- distance in Kilometer between destination", "p2_x) * f_total y_total += (p1_y + p2_y) * f_total", "2, ymin + ywidth / 2]} def point_distance(point1, point2): \"\"\"", "joining start & end points x12 = source[end][\"lng\"] - source[start][\"lng\"]", "p1_y x_total += (p1_x + p2_x) * f_total y_total +=", "intersects = False return intersects def _bbox_around_polycoords(coords): \"\"\" bounding box", "is inside the bounding box \"\"\" return not(point['coordinates'][1] < bounds[0]", "n_stack -= 1 if (end - start) > 1: #any", "= kink * 360.0 / (2.0 * math.pi * 6378137.0)", "return poly_area def centroid(poly): \"\"\" get the centroid of polygon", "for line in lines: line[0], line[1] = wgs84togcj02(line[0], line[1]) return", "inside_poly = True return inside_poly def point_in_polygon(point, poly): \"\"\" valid", "if _point_in_polygon(point, coords): return True return False def number2radius(number): \"\"\"", "if(geometry inside radius) return true else false \"\"\" if geometry['type']", "the top-most entries off the stacks start = sig_start[n_stack -", "true else false \"\"\" lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1]", "# count, n_stack, n_dest, start, end, i, sig; # dev_sqr,", "return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0]", "math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"])) d13 = (x13 * x13)", "polygon geojson object return polygon area \"\"\" poly_area = 0", "\"MultiPolygon\" else multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point, coords):", "other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson", "holes at coordinates[1] points = poly['coordinates'][0] j = len(points) -", "stack band_sqr = kink * 360.0 / (2.0 * math.pi", "wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry): \"\"\" convert gcj to", "lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist)", "or point['coordinates'][0] > bounds[3]) def _pnpoly(x, y, coords): \"\"\" the", "the reduced line sig_start = [] # indices of start", "valid whether linestrings from geojson are intersected with each other.", "Keyword arguments: rectangle -- polygon geojson object return centroid \"\"\"", "False return True def area(poly): \"\"\" calculate the area of", "vert.append([0, 0]) inside = False i = 0 j =", "empty while n_stack > 0: # ... pop the top-most", "math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng)", "is inside a radius around a center Keyword arguments: geometry", "\"\"\" valid whether the point is located in a mulitpolygon", "= line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j + 1][0] ua_t", "arguments: point -- point geojson object multipoly -- multipolygon geojson", "[o.lng, o.lat]}, r) def wgs2gcj(geometry): \"\"\" convert wgs84 to gcj", "1] = sig sig_end[n_stack - 1] = end n_stack +=", "1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j", "x23 = source[i][\"lng\"] - source[end][\"lng\"] y23 = source[i][\"lat\"] - source[end][\"lat\"]", "in lines: line[0], line[1] = wgs84togcj02(line[0], line[1]) return geometry def", "f_total y_total += (p1_y + p2_y) * f_total j =", "u_b u_b = ub_t / u_b if 0 <= u_a", "(radius_in_meters / 1000) / 6371 # convert meters to radiant", "*= math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"])) d23 = (x23 *", "coordinates: point['coordinates'] = coordinate if point_distance(point, center) > radius: return", "def number2degree(number): \"\"\" convert radius into degree Keyword arguments: number", "a1_y)]}) # if len(intersects) == 0: # intersects = False", "sided circle poly = [] for step in range(0, steps):", "for lines in coordinates: for line in lines: line[0], line[1]", "x12 *= math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"])) # use avg", "reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line geojson object", "= points[j][1] p2_y = points[j][0] poly_area += p1_x * p2_y", "depth kept kink depth is the height of the triangle", "point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1]", "poly_area += p1_x * p2_y poly_area -= p1_y * p2_x", "arguments: poly -- polygon geojson object return polygon centroid \"\"\"", "import wgs84togcj02 from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\"", "radius around a center Keyword arguments: geometry -- point/linstring/polygon geojson", "area(poly): \"\"\" calculate the area of polygon Keyword arguments: poly", "{'type': 'Point', 'coordinates': [y_total / six_area, x_total / six_area]} def", "point linestring point if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates']", "0: # intersects = False return intersects def _bbox_around_polycoords(coords): \"\"\"", "'Point', 'coordinates': [ a1_x + u_a * (a2_x - a1_x),", "number * 180 / math.pi def draw_circle(radius_in_meters, center_point, steps=15): \"\"\"", "a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \\", "not inside j = i i += 1 return inside", "source[end][\"lng\"] - source[start][\"lng\"] y12 = source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12)", "_point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if not inside_box: return False", "current start point index[n_dest] = start n_dest += 1 #", "- 1 count = len(points) for i in range(0, count):", "p2_x = points[j][1] p2_y = points[j][0] poly_area += p1_x *", "use avg lat to reduce lng d12 = (x12 *", "if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True return inside_poly def", "if(point inside multipoly) return true else false \"\"\" lon1 =", "find most deviant intermediate point to either side of line", "6 return {'type': 'Point', 'coordinates': [y_total / six_area, x_total /", "deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2),", "f_total j = i six_area = area(poly) * 6 return", "False return intersects def _bbox_around_polycoords(coords): \"\"\" bounding box \"\"\" x_all", "on the sphere like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments:", "return true else false \"\"\" coords = [poly['coordinates']] if poly[", "(b2_x - b1_x) * (a1_y - b1_y) - \\ (b2_y", "point_in_polygon(point, poly): \"\"\" valid whether the point is located in", "# is there a sig. intermediate point ? #... no,", "wgs84togcj02 from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\" To", "\"\"\" lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0]", "\"\"\" convert gcj to bd referencing by https://github.com/wandergis/coordTransform_py \"\"\" #", "- 1] end = sig_end[n_stack - 1] n_stack -= 1", "ub_t = (a2_x - a1_x) * (a1_y - b1_y) -", "# one or two points # more complex case. initialize", "object if(point inside multipoly) return true else false \"\"\" steps", "Keyword arguments: number -- radius return degree \"\"\" return number", "j = i six_area = area(poly) * 6 return {'type':", "point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2", "= 360.0 - math.fabs(x13) x13 *= math.cos(F * (source[i][\"lat\"] +", "the stack is not empty while n_stack > 0: #", "/ 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 -", "- 1 n_dest += 1 # make return array r", "1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i", "= start max_dev_sqr = -1.0 while i < end: x13", "number2radius(number): \"\"\" convert degree into radius Keyword arguments: number --", "degree into radius Keyword arguments: number -- degree return radius", "supported) Keyword arguments: point -- point geojson object multipoly --", "* math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) *", "else false \"\"\" steps = steps if steps > 15", "0 j = len(vert) - 1 while i < len(vert):", "lat1) deg_lon = number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat /", "* math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) *", "return True def area(poly): \"\"\" calculate the area of polygon", "To valid whether linestrings from geojson are intersected with each", "circle poly = [] for step in range(0, steps): brng", "start point index[n_dest] = start n_dest += 1 else: #", "coords_array: if _point_in_polygon(point, coords): return True return False def number2radius(number):", "start point index[n_dest] = start n_dest += 1 # transfer", "= 0 sig_end[0] = count - 1 n_stack = 1", "destination and base point return destination point object \"\"\" dist", "d12 = (x12 * x12) + (y12 * y12) i", "is not empty while n_stack > 0: # ... pop", "+ math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1 +", "get the centroid of the rectangle Keyword arguments: rectangle --", "- xmin ywidth = ymax - ymin return {'type': 'Point',", "and u_a <= 1 and 0 <= u_b and u_b", "points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area += p1_x", "0 <= u_a and u_a <= 1 and 0 <=", "False def number2radius(number): \"\"\" convert degree into radius Keyword arguments:", "number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) +", "break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if not inside_box:", "c) * 1000 def geometry_within_radius(geometry, center, radius): \"\"\" To valid", "<= 1 and 0 <= u_b and u_b <= 1:", "it's enough to check the exterior ring of the Polygon", "- math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\":", "center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) /", "p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] poly_area", "avg lat to reduce lng d12 = (x12 * x12)", "? #... no, so transfer current start point index[n_dest] =", "d23; F = (math.pi / 180.0) * 0.5 index =", "{\"type\": \"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\" get the centroid", "_point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): \"\"\" valid whether the point", "the algorithm to judge whether the point is located in", "!= (vert[j][0] > y)) and (x < (vert[j][1] - vert[i][1])", "d13, x23, y23, d23; F = (math.pi / 180.0) *", "start n_dest += 1 else: # ... yes, so push", "= [] for i in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda", "# if len(intersects) == 0: # intersects = False return", "True return inside_poly def point_in_polygon(point, poly): \"\"\" valid whether the", "ymin + ywidth / 2]} def point_distance(point1, point2): \"\"\" calculate", "the point is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert", "this depth kept kink depth is the height of the", "point is inside the bounding box \"\"\" return not(point['coordinates'][1] <", "point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon", "_pnpoly(x, y, coords): \"\"\" the algorithm to judge whether the", "point and a distance Keyword arguments: pt -- polygon geojson", "on centerPoint and radius Keyword arguments: point1 -- point one", "radius) return true else false \"\"\" if geometry['type'] == 'Point':", "geometry['coordinates'] for lines in coordinates: for line in lines: line[0],", "metres, kinks above this depth kept kink depth is the", "steps=15): \"\"\" get a circle shape polygon based on centerPoint", "convert wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO:", "coordinates: for line in lines: line[0], line[1] = wgs84togcj02(line[0], line[1])", "-- second line geojson object if(line1 intersects with other) return", "\"\"\" source[] array of geojson points kink in metres, kinks", "object radius -- radius if(geometry inside radius) return true else", "if math.fabs(x13) > 180.0: x13 = 360.0 - math.fabs(x13) x13", "b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j +", "Kilometer between destination and base point return destination point object", "- b1_y) * (a2_x - a1_x) - (b2_x - b1_x)", "= geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate", "p2_x j = i poly_area /= 2 return poly_area def", "intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a * (a2_x -", "n_dest += 1 # transfer last point index[n_dest] = count", "box \"\"\" x_all = [] y_all = [] for first", "kinks above this depth kept kink depth is the height", "multipoly) return true else false \"\"\" steps = steps if", "meters to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided", "else false \"\"\" coords_array = [multipoly['coordinates']] if multipoly[ 'type'] ==", "based on centerPoint and radius Keyword arguments: point1 -- point", "angular distance in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0])", "Keyword arguments: line1 -- first line geojson object line2 --", "start & end points x12 = source[end][\"lng\"] - source[start][\"lng\"] y12", "y13 * x12) * (x13 * y12 - y13 *", "math.sin(dist) * math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist)", "0 sig_end[0] = count - 1 n_stack = 1 #", "source[start][\"lat\"] if math.fabs(x12) > 180.0: x12 = 360.0 - math.fabs(x12)", "wgs2gcj(geometry): \"\"\" convert wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\"", "1 n_dest += 1 # make return array r =", "for further processing n_stack += 1 sig_start[n_stack - 1] =", "with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first", "\"\"\" source_coord = map(lambda o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source)", "degrees band_sqr *= band_sqr n_dest = 0 sig_start[0] = 0", "https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0, 0]] for coord in coords:", "= start n_dest += 1 # transfer last point index[n_dest]", "degree return radius \"\"\" return number * math.pi / 180", "# ... yes, so push two sub-sections on stack for", "j = len(points) - 1 count = len(points) for i", "= sig sig_end[n_stack - 1] = end n_stack += 1", "- b1_x) * (a1_y - b1_y) - \\ (b2_y -", "ring of the Polygon coordinates = geometry['coordinates'][0] if geometry['type'] ==", "source_coord # one or two points # more complex case.", "True if not inside_box: return False inside_poly = False for", "{\"type\": \"Point\",\"coordinates\": [o.lng, o.lat]}, r) def wgs2gcj(geometry): \"\"\" convert wgs84", "* y23) if d13 >= (d12 + d23): dev_sqr =", "= (b2_x - b1_x) * (a1_y - b1_y) - \\", "* x12) / d12 # solve triangle if dev_sqr >", "def wgs2gcj(geometry): \"\"\" convert wgs84 to gcj referencing by https://github.com/wandergis/coordTransform_py", "false \"\"\" if geometry['type'] == 'Point': return point_distance(geometry, center) <=", "convert gcj to bd referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO:", "whether the point is located in a mulitpolygon (donut polygon", "(y12 * y12) i = start + 1 sig =", "sub-sections on stack for further processing n_stack += 1 sig_start[n_stack", "shape polygon based on centerPoint and radius Keyword arguments: point1", "/ 6371 # convert meters to radiant rad_center = [number2radius(center[0]),", "six_area = area(poly) * 6 return {'type': 'Point', 'coordinates': [y_total", "segments \"\"\" source_coord = map(lambda o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]},", "180.0: x12 = 360.0 - math.fabs(x12) x12 *= math.cos(F *", "0 y_total = 0 # TODO: polygon holes at coordinates[1]", "\"\"\" steps = steps if steps > 15 else 15", "return [min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds): \"\"\" valid", "math.cos(dist) - math.sin(lat1) * math.sin(lat2)) lon2 = (lon2 + 3", "* 180 / math.pi def draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get", "math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\": [poly]}", "1 if (end - start) > 1: #any intermediate points", "radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1])", "lat2 = point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon =", "Polygon coordinates = geometry['coordinates'][0] if geometry['type'] == 'Polygon' else geometry['coordinates']", "x12) + (y12 * y12) i = start + 1", "so push two sub-sections on stack for further processing n_stack", "(vert[j][1] - vert[i][1]) * (y - vert[i][0]) / (vert[j][0] -", "= len(vert) - 1 while i < len(vert): if ((vert[i][0]", "initialize stack band_sqr = kink * 360.0 / (2.0 *", "and b-c are two consecutive line segments \"\"\" source_coord =", "-- point two geojson object if(point inside multipoly) return true", "= poly['coordinates'][0] j = len(points) - 1 count = len(points)", "get the centroid of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments:", "== 'MultiLineString': coordinates = geometry['coordinates'] for lines in coordinates: for", "object if(line1 intersects with other) return intersect point array else", "intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 --", "sig_end[n_stack - 1] = end n_stack += 1 sig_start[n_stack -", "= 0 x_total = 0 y_total = 0 # TODO:", "p2_y = points[j][0] poly_area += p1_x * p2_y poly_area -=", "*= band_sqr n_dest = 0 sig_start[0] = 0 sig_end[0] =", "[] for step in range(0, steps): brng = 2 *", "ywidth = ymax - ymin return {'type': 'Point', 'coordinates': [xmin", "+ 1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0] b2_x =", "poly_area def centroid(poly): \"\"\" get the centroid of polygon adapted", "two points # more complex case. initialize stack band_sqr =", "* (a1_y - b1_y) - \\ (a2_y - a1_y) *", "- source[start][\"lat\"] if math.fabs(x12) > 180.0: x12 = 360.0 -", "empty array \"\"\" intersects = [] for i in range(0,", "- 1 while i < len(vert): if ((vert[i][0] > y)", "# solve triangle if dev_sqr > max_dev_sqr: sig = i", "intermediate point to either side of line joining start &", "n_dest += 1 else: # ... yes, so push two", "\\ (b2_y - b1_y) * (a1_x - b1_x) ub_t =", "geojson object if(point inside poly) return true else false \"\"\"", "o.coordinates[1]}, source) # count, n_stack, n_dest, start, end, i, sig;", "p2_y = points[j][0] f_total = p1_x * p2_y - p2_x", "return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): \"\"\"", "-- point geojson object radius -- radius if(geometry inside radius)", "sig. intermediate point ? #... no, so transfer current start", "sig_end[n_stack - 1] = sig else: # ... no intermediate", "linestring or polygon is inside a radius around a center", "a1_y = line1['coordinates'][i][0] a2_x = line1['coordinates'][i + 1][1] a2_y =", "\"\"\" valid whether the point is inside the bounding box", "stacks start = sig_start[n_stack - 1] end = sig_end[n_stack -", "+= 1 sig_start[n_stack - 1] = sig sig_end[n_stack - 1]", "<= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x + u_a *", "- math.fabs(x12) x12 *= math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"])) #", "'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point,", "- 1] = sig sig_end[n_stack - 1] = end n_stack", "Keyword arguments: number -- degree return radius \"\"\" return number", "a1_x) * (a1_y - b1_y) - \\ (a2_y - a1_y)", "= [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly = []", "= [] # aray of indexes of source points to", "{} # it's enough to check the exterior ring of", "+ math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2)", "= line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y = line2['coordinates'][j][0]", "< (vert[j][1] - vert[i][1]) * (y - vert[i][0]) / (vert[j][0]", "- start) > 1: #any intermediate points ? # ...", "to gcj referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring", "6378137.0) # Now in degrees band_sqr *= band_sqr n_dest =", "count = len(points) for i in range(0, count): p1_x =", "poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): \"\"\" valid whether", "* math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 =", "180.0: x13 = 360.0 - math.fabs(x13) x13 *= math.cos(F *", "for coord in coords: if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)):", "= [] for step in range(0, steps): brng = 2", "/ 2]} def point_distance(point1, point2): \"\"\" calculate the distance between", "<= u_b and u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [", "dist): \"\"\" Calculate a destination Point base on a base", "point index[n_dest] = count - 1 n_dest += 1 #", "& end of working section sig_end = [] # check", "a polygon Keyword arguments: point -- point geojson object poly", "of source points to include in the reduced line sig_start", "j = len(vert) - 1 while i < len(vert): if", "(b2_y - b1_y) * (a2_x - a1_x) - (b2_x -", "b2_y = line2['coordinates'][j + 1][0] ua_t = (b2_x - b1_x)", "[center_point['coordinates'][1], center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371 #", "j in range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y", "whether the point is located in a polygon Keyword arguments:", "p1_x = points[i][1] p1_y = points[i][0] p2_x = points[j][1] p2_y", "i in range(0, len(line1['coordinates']) - 1): for j in range(0,", "b1_x) ub_t = (a2_x - a1_x) * (a1_y - b1_y)", "geojson object return centroid \"\"\" bbox = rectangle['coordinates'][0] xmin =", "= points[j][1] p2_y = points[j][0] f_total = p1_x * p2_y", "ua_t / u_b u_b = ub_t / u_b if 0", "object poly -- polygon geojson object if(point inside poly) return", "while i < end: x13 = source[i][\"lng\"] - source[start][\"lng\"] y13", "if(point inside multipoly) return true else false \"\"\" coords_array =", "intermediate points, so transfer current start point index[n_dest] = start", "r) def wgs2gcj(geometry): \"\"\" convert wgs84 to gcj referencing by", "math.pow(math.sin(deg_lon / 2), 2) c = 2 * math.atan2(math.sqrt(a), math.sqrt(1", "geojson object if(point inside multipoly) return true else false \"\"\"", "vert[i][1]): inside = not inside j = i i +=", "source[start][\"lat\"] if math.fabs(x13) > 180.0: x13 = 360.0 - math.fabs(x13)", "in a mulitpolygon (donut polygon is not supported) Keyword arguments:", "center_point['coordinates'][0]] dist = (radius_in_meters / 1000) / 6371 # convert", "elif geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point =", "= source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23) > 180.0: x23 =", "a1_y + u_a * (a2_y - a1_y)]}) # if len(intersects)", "1 return inside def _point_in_polygon(point, coords): inside_box = False for", "number -- radius return degree \"\"\" return number * 180", "-- radius if(geometry inside radius) return true else false \"\"\"", "> max_dev_sqr: sig = i max_dev_sqr = dev_sqr i +=", "for line in lines: line[0], line[1] = gcj02tobd09(line[0], line[1]) return", "y12 = source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12) > 180.0: x12", "geometry['type'] == 'Polygon' else geometry['coordinates'] for coordinate in coordinates: point['coordinates']", "of indexes of source points to include in the reduced", "located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0, 0]]", "for i in range(0, n_dest): r.append(source_coord[index[i]]) return map(lambda o: {\"type\":", "o.coordinates[0], \"lat\": o.coordinates[1]}, source) # count, n_stack, n_dest, start, end,", "-180 degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]}", "b1_y) - \\ (b2_y - b1_y) * (a1_x - b1_x)", "+= 1 sig_start[n_stack - 1] = start sig_end[n_stack - 1]", "point array else empty array \"\"\" intersects = [] for", "coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\" To valid whether", "if math.fabs(x12) > 180.0: x12 = 360.0 - math.fabs(x12) x12", "(b2_x - b1_x) * (a2_y - a1_y) if not u_b", "b1_y = line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y =", "= (a2_x - a1_x) * (a1_y - b1_y) - \\", "p1_y * p2_x j = i poly_area /= 2 return", "dev_sqr = d23 elif d23 >= (d12 + d13): dev_sqr", "1): for j in range(0, len(line2['coordinates']) - 1): a1_x =", "math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"])) d23 = (x23 * x23)", "xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax", "degree \"\"\" return number * 180 / math.pi def draw_circle(radius_in_meters,", "line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j + 1][0] ua_t =", "center_point, steps=15): \"\"\" get a circle shape polygon based on", "lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 = point2['coordinates'][1] deg_lat", "center) <= radius elif geometry['type'] == 'LineString' or geometry['type'] ==", "*= math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"])) d13 = (x13 *", "'Point': return point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString'", "else empty array \"\"\" intersects = [] for i in", "* math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) *", "* 6378137.0) # Now in degrees band_sqr *= band_sqr n_dest", "lines: line[0], line[1] = wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry):", "= count - 1 n_dest += 1 # make return", "count - 1 n_dest += 1 # make return array", "coordinate if point_distance(point, center) > radius: return False return True", "+ source[end][\"lat\"])) d23 = (x23 * x23) + (y23 *", "band_sqr *= band_sqr n_dest = 0 sig_start[0] = 0 sig_end[0]", "steps): brng = 2 * math.pi * step / steps", "poly -- polygon geojson object return polygon centroid \"\"\" f_total", "1 count = len(points) for i in range(0, count): p1_x", "in metres, kinks above this depth kept kink depth is", "# ... no intermediate points, so transfer current start point", "arguments: geometry -- point/linstring/polygon geojson object center -- point geojson", "d12 # solve triangle if dev_sqr > max_dev_sqr: sig =", "- a)) return (6371 * c) * 1000 def geometry_within_radius(geometry,", "http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object point2", "math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) *", "if (end - start) > 1: #any intermediate points ?", "point object \"\"\" dist = float(dist) / 6371 # convert", "are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1", "'github: https://github.com/brandonxiang/geojson-python-utils' import math from coordTransform_utils import wgs84togcj02 from coordTransform_utils", "for simple cases count = len(source_coord) if count < 3:", "y_total = 0 # TODO: polygon holes at coordinates[1] points", "line geojson object line2 -- second line geojson object if(line1", "index[n_dest] = count - 1 n_dest += 1 # make", "in coordinates: for line in lines: line[0], line[1] = wgs84togcj02(line[0],", "- math.fabs(x23) x23 *= math.cos(F * (source[i][\"lat\"] + source[end][\"lat\"])) d23", "coords) def point_in_multipolygon(point, multipoly): \"\"\" valid whether the point is", "geometry['type'] == 'LineString' or geometry['type'] == 'Polygon': point = {}", "* p2_y - p2_x * p1_y x_total += (p1_x +", "x23) + (y23 * y23) if d13 >= (d12 +", "+ u_a * (a2_x - a1_x), a1_y + u_a *", "d12, x13, y13, d13, x23, y23, d23; F = (math.pi", "in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 =", "dist to angular distance in radians brng = number2radius(brng) lon1", "top-most entries off the stacks start = sig_start[n_stack - 1]", "-- distance in Kilometer between destination and base point return", "from coordTransform_utils import gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\" To valid", "math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng = rad_center[1]", "multipoly -- multipolygon geojson object if(point inside multipoly) return true", "xwidth = xmax - xmin ywidth = ymax - ymin", "source[start][\"lng\"] y12 = source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12) > 180.0:", "intersects = [] for i in range(0, len(line1['coordinates']) - 1):", "arguments: poly -- polygon geojson object return polygon area \"\"\"", "- p2_x * p1_y x_total += (p1_x + p2_x) *", "located in a mulitpolygon (donut polygon is not supported) Keyword", "- b1_y) - \\ (a2_y - a1_y) * (a1_x -", "def simplify(source, kink=20): \"\"\" source[] array of geojson points kink", "steps = steps if steps > 15 else 15 center", "mulitpolygon (donut polygon is not supported) Keyword arguments: point --", "max(x_all), max(y_all)] def _point_in_bbox(point, bounds): \"\"\" valid whether the point", "if not inside_box: return False inside_poly = False for coord", "inside def _point_in_polygon(point, coords): inside_box = False for coord in", "poly) return true else false \"\"\" coords = [poly['coordinates']] if", "= source[end][\"lat\"] - source[start][\"lat\"] if math.fabs(x12) > 180.0: x12 =", "indices of start & end of working section sig_end =", "abc where a-b and b-c are two consecutive line segments", "exterior ring of the Polygon coordinates = geometry['coordinates'][0] if geometry['type']", "(a2_x - a1_x) * (a1_y - b1_y) - \\ (a2_y", "F = (math.pi / 180.0) * 0.5 index = []", "min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds): \"\"\" valid whether the", "- lon1) a = math.pow(math.sin(deg_lat / 2), 2) + math.cos(number2radius(lat1))", "\"\"\" valid whether the point is located in a polygon", "? # ... yes, so find most deviant intermediate point", "two consecutive line segments \"\"\" source_coord = map(lambda o: {\"lng\":", "circle shape polygon based on centerPoint and radius Keyword arguments:", "above this depth kept kink depth is the height of", "arguments: number -- radius return degree \"\"\" return number *", "(p1_x + p2_x) * f_total y_total += (p1_y + p2_y)", "false \"\"\" coords_array = [multipoly['coordinates']] if multipoly[ 'type'] == \"MultiPolygon\"", "+= 1 # make return array r = [] for", "a mulitpolygon (donut polygon is not supported) Keyword arguments: point", "y_total += (p1_y + p2_y) * f_total j = i", "= len(source_coord) if count < 3: return source_coord # one", "geojson object if(line1 intersects with other) return intersect point array", "[] for i in range(0, len(line1['coordinates']) - 1): for j", "pt -- polygon geojson object brng -- an angle in", "coordinates[1] points = poly['coordinates'][0] j = len(points) - 1 count", "math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)])", "def point_in_multipolygon(point, multipoly): \"\"\" valid whether the point is located", "degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20):", "-- point/linstring/polygon geojson object center -- point geojson object radius", "a2_y = line1['coordinates'][i + 1][0] b1_x = line2['coordinates'][j][1] b1_y =", "return true else false \"\"\" lon1 = point1['coordinates'][0] lat1 =", "import math from coordTransform_utils import wgs84togcj02 from coordTransform_utils import gcj02tobd09", "= start sig_end[n_stack - 1] = sig else: # ...", "== 0: u_a = ua_t / u_b u_b = ub_t", "geojson object center -- point geojson object radius -- radius", "point_distance(geometry, center) <= radius elif geometry['type'] == 'LineString' or geometry['type']", "object point2 -- point two geojson object if(point inside multipoly)", "polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon geojson", "math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat))", "Keyword arguments: geometry -- point/linstring/polygon geojson object center -- point", "< bounds[1] or point['coordinates'][0] > bounds[3]) def _pnpoly(x, y, coords):", "\"\"\" if geometry['type'] == 'Point': return point_distance(geometry, center) <= radius", "steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist)", "line joining start & end points x12 = source[end][\"lng\"] -", "* math.pi) - math.pi # normalise to -180 degree +180", "to -180 degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2),", "+= (p1_y + p2_y) * f_total j = i six_area", "+ p2_x) * f_total y_total += (p1_y + p2_y) *", "reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson object", "convert degree into radius Keyword arguments: number -- degree return", "= [poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates'] return", "i += 1 return inside def _point_in_polygon(point, coords): inside_box =", "(p1_y + p2_y) * f_total j = i six_area =", "between destination and base point return destination point object \"\"\"", "- b1_x) * (a2_y - a1_y) if not u_b ==", "[number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly = [] for", "max_dev_sqr < band_sqr: # is there a sig. intermediate point", "count < 3: return source_coord # one or two points", "is there a sig. intermediate point ? #... no, so", "# 15 sided circle poly = [] for step in", "/ 2, ymin + ywidth / 2]} def point_distance(point1, point2):", "line sig_start = [] # indices of start & end", "[multipoly['coordinates']] if multipoly[ 'type'] == \"MultiPolygon\" else multipoly['coordinates'] for coords", "yes, so find most deviant intermediate point to either side", "map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point one geojson", "if(line1 intersects with other) return intersect point array else empty", "def _pnpoly(x, y, coords): \"\"\" the algorithm to judge whether", "draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get a circle shape polygon based", "math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(rad_center[0]),", "coordinate in coordinates: point['coordinates'] = coordinate if point_distance(point, center) >", "bd referencing by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring point", "- vert[i][0]) + vert[i][1]): inside = not inside j =", "polygon holes at coordinates[1] points = poly['coordinates'][0] j = len(points)", "return true else false \"\"\" if geometry['type'] == 'Point': return", "multipoly) return true else false \"\"\" lon1 = point1['coordinates'][0] lat1", "if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box = True if", "+ source[start][\"lat\"])) d13 = (x13 * x13) + (y13 *", "array of geojson points kink in metres, kinks above this", "destination_point(point, brng, dist): \"\"\" Calculate a destination Point base on", "= 0 sig_start[0] = 0 sig_end[0] = count - 1", "return true else false \"\"\" steps = steps if steps", "vert.append(coord[0]) vert.append([0, 0]) inside = False i = 0 j", "y12 - y13 * x12) / d12 # solve triangle", "number * math.pi / 180 def number2degree(number): \"\"\" convert radius", "d13 >= (d12 + d23): dev_sqr = d23 elif d23", "= 1 # while the stack is not empty while", "current start point index[n_dest] = start n_dest += 1 else:", "one geojson object point2 -- point two geojson object if(point", "'Point', 'coordinates': [xmin + xwidth / 2, ymin + ywidth", "a destination Point base on a base point and a", "- a1_y) * (a1_x - b1_x) u_b = (b2_y -", "second line geojson object if(line1 intersects with other) return intersect", "of working section sig_end = [] # check for simple", "range(0, len(line2['coordinates']) - 1): a1_x = line1['coordinates'][i][1] a1_y = line1['coordinates'][i][0]", "math.fabs(x12) > 180.0: x12 = 360.0 - math.fabs(x12) x12 *=", "centroid of the rectangle Keyword arguments: rectangle -- polygon geojson", "lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) * math.sin(dist) *", "coord in coords: if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box", "off the stacks start = sig_start[n_stack - 1] end =", "number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a =", "= line1['coordinates'][i + 1][1] a2_y = line1['coordinates'][i + 1][0] b1_x", "p1_x * p2_y - p2_x * p1_y x_total += (p1_x", "else false \"\"\" lon1 = point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2", "count - 1 n_stack = 1 # while the stack", "line1 -- first line geojson object line2 -- second line", "y) != (vert[j][0] > y)) and (x < (vert[j][1] -", "x23, y23, d23; F = (math.pi / 180.0) * 0.5", "def draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get a circle shape polygon", "simple cases count = len(source_coord) if count < 3: return", "return source_coord # one or two points # more complex", "like google map reference http://www.movable-type.co.uk/scripts/latlong.html Keyword arguments: point1 -- point", "+ 1 sig = start max_dev_sqr = -1.0 while i", "/ u_b if 0 <= u_a and u_a <= 1", "number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\" get", "\\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c = 2", "= False return intersects def _bbox_around_polycoords(coords): \"\"\" bounding box \"\"\"", "= math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng))", "math.pi) - math.pi # normalise to -180 degree +180 degree", "+ 1][0] ua_t = (b2_x - b1_x) * (a1_y -", "return intersects def _bbox_around_polycoords(coords): \"\"\" bounding box \"\"\" x_all =", "/ 1000) / 6371 # convert meters to radiant rad_center", "return {'type': 'Point', 'coordinates': [xmin + xwidth / 2, ymin", "= line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j", "for node in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside =", "= {} # it's enough to check the exterior ring", "section sig_end = [] # check for simple cases count", "p1_y = points[i][0] p2_x = points[j][1] p2_y = points[j][0] f_total", "brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2", "polygon Keyword arguments: point -- point geojson object poly --", "= 0 y_total = 0 # TODO: polygon holes at", "'Polygon' else poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): \"\"\"", "j = i poly_area /= 2 return poly_area def centroid(poly):", "- b1_x) ub_t = (a2_x - a1_x) * (a1_y -", "if multipoly[ 'type'] == \"MultiPolygon\" else multipoly['coordinates'] for coords in", "* (a1_x - b1_x) u_b = (b2_y - b1_y) *", "\"\"\" vert = [[0, 0]] for coord in coords: for", "point_in_multipolygon(point, multipoly): \"\"\" valid whether the point is located in", "math.pi def draw_circle(radius_in_meters, center_point, steps=15): \"\"\" get a circle shape", "start sig_end[n_stack - 1] = sig else: # ... no", "= d23 elif d23 >= (d12 + d13): dev_sqr =", "point_distance(point1, point2): \"\"\" calculate the distance between two point on", "= area(poly) * 6 return {'type': 'Point', 'coordinates': [y_total /", "is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation \"\"\" vert = [[0,", "source) # count, n_stack, n_dest, start, end, i, sig; #", "False inside_poly = False for coord in coords: if inside_poly:", "arguments: line1 -- first line geojson object line2 -- second", "* (a1_y - b1_y) - \\ (b2_y - b1_y) *", "u_b and u_b <= 1: intersects.append({'type': 'Point', 'coordinates': [ a1_x", "lon1 = number2radius(point['coordinates'][0]) lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) *", "lat1 = number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1)", "'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): \"\"\" source[] array of", "rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly =", "x23 = 360.0 - math.fabs(x23) x23 *= math.cos(F * (source[i][\"lat\"]", "\"\"\" calculate the distance between two point on the sphere", "geojson object poly -- polygon geojson object if(point inside poly)", "\"\"\" return number * math.pi / 180 def number2degree(number): \"\"\"", "n_dest = 0 sig_start[0] = 0 sig_end[0] = count -", "if steps > 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]]", "math.fabs(x12) x12 *= math.cos(F * (source[end][\"lat\"] + source[start][\"lat\"])) # use", "if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True", "0: # ... pop the top-most entries off the stacks", "for i in range(0, len(line1['coordinates']) - 1): for j in", "number2radius(center[1])] # 15 sided circle poly = [] for step", "radius if(geometry inside radius) return true else false \"\"\" if", "judge whether the point is located in polygon reference: https://www.ecse.rpi.edu/~wrf/Research/Short_Notes/pnpoly.html#Explanation", "/= 2 return poly_area def centroid(poly): \"\"\" get the centroid", "360.0 - math.fabs(x13) x13 *= math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"]))", "around a center Keyword arguments: geometry -- point/linstring/polygon geojson object", "ymin return {'type': 'Point', 'coordinates': [xmin + xwidth / 2,", "[poly]} def rectangle_centroid(rectangle): \"\"\" get the centroid of the rectangle", "= 'github: https://github.com/brandonxiang/geojson-python-utils' import math from coordTransform_utils import wgs84togcj02 from", "math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) * math.cos(brng)) lng", "rectangle -- polygon geojson object return centroid \"\"\" bbox =", "> 15 else 15 center = [center_point['coordinates'][1], center_point['coordinates'][0]] dist =", "= 0 # TODO: polygon holes at coordinates[1] points =", "array else empty array \"\"\" intersects = [] for i", "radius -- radius if(geometry inside radius) return true else false", "(source[end][\"lat\"] + source[start][\"lat\"])) # use avg lat to reduce lng", "0]) inside = False i = 0 j = len(vert)", "def linestrings_intersect(line1, line2): \"\"\" To valid whether linestrings from geojson", "ywidth / 2]} def point_distance(point1, point2): \"\"\" calculate the distance", "coordinates: for line in lines: line[0], line[1] = gcj02tobd09(line[0], line[1])", "true else false \"\"\" coords = [poly['coordinates']] if poly[ 'type']", "end of working section sig_end = [] # check for", "n_stack += 1 sig_start[n_stack - 1] = sig sig_end[n_stack -", "o.lat]}, r) def wgs2gcj(geometry): \"\"\" convert wgs84 to gcj referencing", "line[1]) return geometry def gcj2bd(geometry): \"\"\" convert gcj to bd", "in coords: if inside_box: break if _point_in_bbox(point, _bbox_around_polycoords(coord)): inside_box =", "transfer last point index[n_dest] = count - 1 n_dest +=", "sig_end = [] # check for simple cases count =", "math.pi * step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist)", "a-b and b-c are two consecutive line segments \"\"\" source_coord", "in coords_array: if _point_in_polygon(point, coords): return True return False def", "return number * 180 / math.pi def draw_circle(radius_in_meters, center_point, steps=15):", "x12) / d12 # solve triangle if dev_sqr > max_dev_sqr:", "math.cos(brng)) lon2 = lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1),", "\"\"\" coords_array = [multipoly['coordinates']] if multipoly[ 'type'] == \"MultiPolygon\" else", "https://github.com/brandonxiang/geojson-python-utils' import math from coordTransform_utils import wgs84togcj02 from coordTransform_utils import", "return {\"type\": \"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\" get the", "first in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)]", "each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword arguments: line1 -- first line", "elif d23 >= (d12 + d13): dev_sqr = d13 else:", "x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)] def _point_in_bbox(point, bounds):", "point if geometry['type'] == 'MultiLineString': coordinates = geometry['coordinates'] for lines", "'coordinates': [y_total / six_area, x_total / six_area]} def destination_point(point, brng,", "_bbox_around_polycoords(coords): \"\"\" bounding box \"\"\" x_all = [] y_all =", "= (math.pi / 180.0) * 0.5 index = [] #", "= True if not inside_box: return False inside_poly = False", "geojson object radius -- radius if(geometry inside radius) return true", "into radius Keyword arguments: number -- degree return radius \"\"\"", "else: # ... yes, so push two sub-sections on stack", "index = [] # aray of indexes of source points", "Keyword arguments: point -- point geojson object multipoly -- multipolygon", "if geometry['type'] == 'Point': return point_distance(geometry, center) <= radius elif", "> y) != (vert[j][0] > y)) and (x < (vert[j][1]", "ymin = bbox[0][1] xmax = bbox[2][0] ymax = bbox[2][1] xwidth", "= point2['coordinates'][1] deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2", "poly -- polygon geojson object return polygon area \"\"\" poly_area", "math.fabs(x13) x13 *= math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"])) d13 =", "if not u_b == 0: u_a = ua_t / u_b", "brng, dist): \"\"\" Calculate a destination Point base on a", "(a2_y - a1_y) if not u_b == 0: u_a =", "complex case. initialize stack band_sqr = kink * 360.0 /", "if ((vert[i][0] > y) != (vert[j][0] > y)) and (x", "and 0 <= u_b and u_b <= 1: intersects.append({'type': 'Point',", "return centroid \"\"\" bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin", "dev_sqr = (x13 * y12 - y13 * x12) *", "[poly['coordinates']] if poly[ 'type'] == 'Polygon' else poly['coordinates'] return _point_in_polygon(point,", "= [[0, 0]] for coord in coords: for node in", "no, so transfer current start point index[n_dest] = start n_dest", "* (source[i][\"lat\"] + source[start][\"lat\"])) d13 = (x13 * x13) +", "= number2radius(point['coordinates'][1]) lat2 = math.asin(math.sin(lat1) * math.cos(dist) + math.cos(lat1) *", "max_dev_sqr = dev_sqr i += 1 if max_dev_sqr < band_sqr:", "line2['coordinates'][j][0] b2_x = line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j +", "distance in radians brng = number2radius(brng) lon1 = number2radius(point['coordinates'][0]) lat1", "= 360.0 - math.fabs(x12) x12 *= math.cos(F * (source[end][\"lat\"] +", "__doc__ = 'github: https://github.com/brandonxiang/geojson-python-utils' import math from coordTransform_utils import wgs84togcj02", "-- polygon geojson object return polygon area \"\"\" poly_area =", "polygon geojson object if(point inside poly) return true else false", "= ymax - ymin return {'type': 'Point', 'coordinates': [xmin +", "one or two points # more complex case. initialize stack", "> 180.0: x12 = 360.0 - math.fabs(x12) x12 *= math.cos(F", "\"\"\" f_total = 0 x_total = 0 y_total = 0", "def _bbox_around_polycoords(coords): \"\"\" bounding box \"\"\" x_all = [] y_all", "3 * math.pi) % (2 * math.pi) - math.pi #", "sig_end[0] = count - 1 n_stack = 1 # while", "d23): dev_sqr = d23 elif d23 >= (d12 + d13):", "\"\"\" convert radius into degree Keyword arguments: number -- radius", "1 sig = start max_dev_sqr = -1.0 while i <", "no intermediate points, so transfer current start point index[n_dest] =", "to check the exterior ring of the Polygon coordinates =", "i max_dev_sqr = dev_sqr i += 1 if max_dev_sqr <", "vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside = False i = 0", "max_dev_sqr = -1.0 while i < end: x13 = source[i][\"lng\"]", "= line2['coordinates'][j + 1][0] ua_t = (b2_x - b1_x) *", "return degree \"\"\" return number * 180 / math.pi def", "lon2 = (lon2 + 3 * math.pi) % (2 *", "= wgs84togcj02(line[0], line[1]) return geometry def gcj2bd(geometry): \"\"\" convert gcj", "- a1_y)]}) # if len(intersects) == 0: # intersects =", "* math.cos(rad_center[0]), math.cos(dist) - math.sin(rad_center[0]) * math.sin(lat)) poly.append([number2degree(lng), number2degree(lat)]) return", "math.sin(dist) * math.cos(brng)) lng = rad_center[1] + math.atan2(math.sin(brng) * math.sin(dist)", "coords: if inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly =", "is located in a polygon Keyword arguments: point -- point", "+ u_a * (a2_y - a1_y)]}) # if len(intersects) ==", "number -- degree return radius \"\"\" return number * math.pi", "whether linestrings from geojson are intersected with each other. reference:", "check the exterior ring of the Polygon coordinates = geometry['coordinates'][0]", "on stack for further processing n_stack += 1 sig_start[n_stack -", "1000 def geometry_within_radius(geometry, center, radius): \"\"\" To valid whether point", "kink=20): \"\"\" source[] array of geojson points kink in metres,", "{'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): \"\"\" source[]", "if len(intersects) == 0: # intersects = False return intersects", "geometry -- point/linstring/polygon geojson object center -- point geojson object", "a1_x + u_a * (a2_x - a1_x), a1_y + u_a", "else false \"\"\" if geometry['type'] == 'Point': return point_distance(geometry, center)", "ymax - ymin return {'type': 'Point', 'coordinates': [xmin + xwidth", "(2.0 * math.pi * 6378137.0) # Now in degrees band_sqr", "band_sqr; # x12, y12, d12, x13, y13, d13, x23, y23,", "bbox[2][1] xwidth = xmax - xmin ywidth = ymax -", "not(point['coordinates'][1] < bounds[0] or point['coordinates'][1] > bounds[2] or point['coordinates'][0] <", "1 if max_dev_sqr < band_sqr: # is there a sig.", "= (x13 * y12 - y13 * x12) * (x13", "= number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1) a", "* \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c =", "1 while i < len(vert): if ((vert[i][0] > y) !=", "= [] for i in range(0, len(line1['coordinates']) - 1): for", "cases count = len(source_coord) if count < 3: return source_coord", "rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0]", "poly_area /= 2 return poly_area def centroid(poly): \"\"\" get the", "- y13 * x12) / d12 # solve triangle if", "# make return array r = [] for i in", "area(poly) * 6 return {'type': 'Point', 'coordinates': [y_total / six_area,", "coords: for node in coord: vert.append(node) vert.append(coord[0]) vert.append([0, 0]) inside", "point geojson object radius -- radius if(geometry inside radius) return", "+ vert[i][1]): inside = not inside j = i i", "area of polygon Keyword arguments: poly -- polygon geojson object", "line in lines: line[0], line[1] = wgs84togcj02(line[0], line[1]) return geometry", "in range(0, len(line1['coordinates']) - 1): for j in range(0, len(line2['coordinates'])", "= 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) return (6371 *", "if point_distance(point, center) > radius: return False return True def", "first line geojson object line2 -- second line geojson object", "x12) * (x13 * y12 - y13 * x12) /", "xmax - xmin ywidth = ymax - ymin return {'type':", "i < len(vert): if ((vert[i][0] > y) != (vert[j][0] >", "of the triangle abc where a-b and b-c are two", "kink depth is the height of the triangle abc where", "poly_area = 0 # TODO: polygon holes at coordinates[1] points", "# while the stack is not empty while n_stack >", "rectangle Keyword arguments: rectangle -- polygon geojson object return centroid", "depth is the height of the triangle abc where a-b", "height of the triangle abc where a-b and b-c are", "\"\"\" get a circle shape polygon based on centerPoint and", "deg_lat = number2radius(lat2 - lat1) deg_lon = number2radius(lon2 - lon1)", "return (6371 * c) * 1000 def geometry_within_radius(geometry, center, radius):", "b2_x = line2['coordinates'][j + 1][1] b2_y = line2['coordinates'][j + 1][0]", "-- point geojson object poly -- polygon geojson object if(point", "return inside_poly def point_in_polygon(point, poly): \"\"\" valid whether the point", "band_sqr = kink * 360.0 / (2.0 * math.pi *", "inside = not inside j = i i += 1", "- a1_x) - (b2_x - b1_x) * (a2_y - a1_y)", "\"Point\",\"coordinates\": [o.lng, o.lat]}, r) def wgs2gcj(geometry): \"\"\" convert wgs84 to", "or polygon is inside a radius around a center Keyword", "valid whether point or linestring or polygon is inside a", "count = len(source_coord) if count < 3: return source_coord #", "/ six_area, x_total / six_area]} def destination_point(point, brng, dist): \"\"\"", "sig_start = [] # indices of start & end of", "# use avg lat to reduce lng d12 = (x12", "two geojson object if(point inside multipoly) return true else false", "- a1_x), a1_y + u_a * (a2_y - a1_y)]}) #", "b1_x) u_b = (b2_y - b1_y) * (a2_x - a1_x)", "= source[i][\"lng\"] - source[start][\"lng\"] y13 = source[i][\"lat\"] - source[start][\"lat\"] if", "y)) and (x < (vert[j][1] - vert[i][1]) * (y -", "math.cos(number2radius(lat1)) * \\ math.cos(number2radius(lat2)) * math.pow(math.sin(deg_lon / 2), 2) c", "object brng -- an angle in degrees dist -- distance", "... no intermediate points, so transfer current start point index[n_dest]", "\"\"\" convert degree into radius Keyword arguments: number -- degree", "two sub-sections on stack for further processing n_stack += 1", "if(point inside multipoly) return true else false \"\"\" steps =", "1000) / 6371 # convert meters to radiant rad_center =", "degree +180 degree return {'type': 'Point', 'coordinates': [number2degree(lon2), number2degree(lat2)]} def", "sig; # dev_sqr, max_dev_sqr, band_sqr; # x12, y12, d12, x13,", "not supported) Keyword arguments: point -- point geojson object multipoly", "multipoly['coordinates'] for coords in coords_array: if _point_in_polygon(point, coords): return True", "radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle poly", "= dev_sqr i += 1 if max_dev_sqr < band_sqr: #", "\"\"\" coords = [poly['coordinates']] if poly[ 'type'] == 'Polygon' else", "return True return False def number2radius(number): \"\"\" convert degree into", "destination point object \"\"\" dist = float(dist) / 6371 #", "1 # while the stack is not empty while n_stack", "... pop the top-most entries off the stacks start =", "= point1['coordinates'][0] lat1 = point1['coordinates'][1] lon2 = point2['coordinates'][0] lat2 =", "'LineString' or geometry['type'] == 'Polygon': point = {} # it's", "'type'] == \"MultiPolygon\" else multipoly['coordinates'] for coords in coords_array: if", "source[end][\"lat\"] if math.fabs(x23) > 180.0: x23 = 360.0 - math.fabs(x23)", "polygon geojson object return polygon centroid \"\"\" f_total = 0", "= end n_stack += 1 sig_start[n_stack - 1] = start", "[number2degree(lon2), number2degree(lat2)]} def simplify(source, kink=20): \"\"\" source[] array of geojson", "point index[n_dest] = start n_dest += 1 else: # ...", "the exterior ring of the Polygon coordinates = geometry['coordinates'][0] if", "a1_y) if not u_b == 0: u_a = ua_t /", "= bbox[0][0] ymin = bbox[0][1] xmax = bbox[2][0] ymax =", "- 1] = sig else: # ... no intermediate points,", "* math.pi) % (2 * math.pi) - math.pi # normalise", "o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source) # count, n_stack, n_dest,", "from geojson are intersected with each other. reference: http://www.kevlindev.com/gui/math/intersection/Intersection.js Keyword", "bounding box \"\"\" x_all = [] y_all = [] for", "the bounding box \"\"\" return not(point['coordinates'][1] < bounds[0] or point['coordinates'][1]", "Now in degrees band_sqr *= band_sqr n_dest = 0 sig_start[0]", "- math.fabs(x13) x13 *= math.cos(F * (source[i][\"lat\"] + source[start][\"lat\"])) d13", "< end: x13 = source[i][\"lng\"] - source[start][\"lng\"] y13 = source[i][\"lat\"]", "len(source_coord) if count < 3: return source_coord # one or", "* x23) + (y23 * y23) if d13 >= (d12", "most deviant intermediate point to either side of line joining", "* step / steps lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) +", "(a1_y - b1_y) - \\ (a2_y - a1_y) * (a1_x", "point = {} # it's enough to check the exterior", "<= u_a and u_a <= 1 and 0 <= u_b", "in coords[0]: x_all.append(first[1]) y_all.append(first[0]) return [min(x_all), min(y_all), max(x_all), max(y_all)] def", "+ 3 * math.pi) % (2 * math.pi) - math.pi", "processing n_stack += 1 sig_start[n_stack - 1] = sig sig_end[n_stack", "* 0.5 index = [] # aray of indexes of", "lon1 + math.atan2(math.sin(brng) * math.sin(dist) * math.cos(lat1), math.cos(dist) - math.sin(lat1)", "> radius: return False return True def area(poly): \"\"\" calculate", "poly.append([number2degree(lng), number2degree(lat)]) return {\"type\": \"Polygon\", \"coordinates\": [poly]} def rectangle_centroid(rectangle): \"\"\"", "# intersects = False return intersects def _bbox_around_polycoords(coords): \"\"\" bounding", "= ua_t / u_b u_b = ub_t / u_b if", "of polygon adapted from http://paulbourke.net/geometry/polyarea/javascript.txt Keyword arguments: poly -- polygon", "point is located in a mulitpolygon (donut polygon is not", "= [] # check for simple cases count = len(source_coord)", "center Keyword arguments: geometry -- point/linstring/polygon geojson object center --", "y12 - y13 * x12) * (x13 * y12 -", "(lon2 + 3 * math.pi) % (2 * math.pi) -", "1 # transfer last point index[n_dest] = count - 1", "+= 1 # transfer last point index[n_dest] = count -", "return geometry def gcj2bd(geometry): \"\"\" convert gcj to bd referencing", "Keyword arguments: point1 -- point one geojson object point2 --", "steps if steps > 15 else 15 center = [center_point['coordinates'][1],", "array r = [] for i in range(0, n_dest): r.append(source_coord[index[i]])", "* (y - vert[i][0]) / (vert[j][0] - vert[i][0]) + vert[i][1]):", "= i i += 1 return inside def _point_in_polygon(point, coords):", "(a1_x - b1_x) u_b = (b2_y - b1_y) * (a2_x", "centerPoint and radius Keyword arguments: point1 -- point one geojson", "(2 * math.pi) - math.pi # normalise to -180 degree", "the centroid of the rectangle Keyword arguments: rectangle -- polygon", "= source[i][\"lng\"] - source[end][\"lng\"] y23 = source[i][\"lat\"] - source[end][\"lat\"] if", "lat = math.asin(math.sin(rad_center[0]) * math.cos(dist) + math.cos(rad_center[0]) * math.sin(dist) *", "- \\ (b2_y - b1_y) * (a1_x - b1_x) ub_t", "else poly['coordinates'] return _point_in_polygon(point, coords) def point_in_multipolygon(point, multipoly): \"\"\" valid", "else false \"\"\" coords = [poly['coordinates']] if poly[ 'type'] ==", "import gcj02tobd09 def linestrings_intersect(line1, line2): \"\"\" To valid whether linestrings", "side of line joining start & end points x12 =", "# aray of indexes of source points to include in", "end points x12 = source[end][\"lng\"] - source[start][\"lng\"] y12 = source[end][\"lat\"]", "[y_total / six_area, x_total / six_area]} def destination_point(point, brng, dist):", "f_total = p1_x * p2_y - p2_x * p1_y x_total", "def _point_in_bbox(point, bounds): \"\"\" valid whether the point is inside", "-= 1 if (end - start) > 1: #any intermediate", "radius into degree Keyword arguments: number -- radius return degree", "math.pi * 6378137.0) # Now in degrees band_sqr *= band_sqr", "1 else: # ... yes, so push two sub-sections on", "(a2_y - a1_y)]}) # if len(intersects) == 0: # intersects", "len(vert): if ((vert[i][0] > y) != (vert[j][0] > y)) and", "= count - 1 n_stack = 1 # while the", "+ (y12 * y12) i = start + 1 sig", "d23 >= (d12 + d13): dev_sqr = d13 else: dev_sqr", "poly = [] for step in range(0, steps): brng =", "object line2 -- second line geojson object if(line1 intersects with", "the point is located in a mulitpolygon (donut polygon is", "180.0: x23 = 360.0 - math.fabs(x23) x23 *= math.cos(F *", "d23 = (x23 * x23) + (y23 * y23) if", "= False for coord in coords: if inside_box: break if", "d13 = (x13 * x13) + (y13 * y13) x23", "polygon based on centerPoint and radius Keyword arguments: point1 --", "ymax = bbox[2][1] xwidth = xmax - xmin ywidth =", "in coordinates: point['coordinates'] = coordinate if point_distance(point, center) > radius:", "= sig_end[n_stack - 1] n_stack -= 1 if (end -", "by https://github.com/wandergis/coordTransform_py \"\"\" # TODO: point linestring point if geometry['type']", "\"\"\" poly_area = 0 # TODO: polygon holes at coordinates[1]", "\"\"\" bbox = rectangle['coordinates'][0] xmin = bbox[0][0] ymin = bbox[0][1]", "point geojson object poly -- polygon geojson object if(point inside", "= False i = 0 j = len(vert) - 1", "index[n_dest] = start n_dest += 1 else: # ... yes,", "coords): \"\"\" the algorithm to judge whether the point is", "dev_sqr, max_dev_sqr, band_sqr; # x12, y12, d12, x13, y13, d13,", "multipoly) return true else false \"\"\" coords_array = [multipoly['coordinates']] if", "so find most deviant intermediate point to either side of", "inside_poly: break if _pnpoly(point['coordinates'][1], point['coordinates'][0], coord): inside_poly = True return", "-- an angle in degrees dist -- distance in Kilometer", "include in the reduced line sig_start = [] # indices", "geojson object brng -- an angle in degrees dist --", "object return centroid \"\"\" bbox = rectangle['coordinates'][0] xmin = bbox[0][0]", "len(points) for i in range(0, count): p1_x = points[i][1] p1_y", "degrees dist -- distance in Kilometer between destination and base", "math.cos(dist) + math.cos(lat1) * math.sin(dist) * math.cos(brng)) lon2 = lon1", "= steps if steps > 15 else 15 center =", "= number2radius(lon2 - lon1) a = math.pow(math.sin(deg_lat / 2), 2)", "source[start][\"lat\"])) # use avg lat to reduce lng d12 =", "to radiant rad_center = [number2radius(center[0]), number2radius(center[1])] # 15 sided circle", "< band_sqr: # is there a sig. intermediate point ?", "(source[i][\"lat\"] + source[start][\"lat\"])) d13 = (x13 * x13) + (y13", "deviant intermediate point to either side of line joining start", "in range(0, steps): brng = 2 * math.pi * step", "whether the point is inside the bounding box \"\"\" return", "= map(lambda o: {\"lng\": o.coordinates[0], \"lat\": o.coordinates[1]}, source) # count,", "source[end][\"lng\"] y23 = source[i][\"lat\"] - source[end][\"lat\"] if math.fabs(x23) > 180.0:" ]
[ "os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB']", "user = 'test' password = 'password' host = 'localhost' database", "'password' host = 'localhost' database = 'example' port = '5432'", "= os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port =", "user = os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database", "'test' password = 'password' host = 'localhost' database = 'example'", "''' user = os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST']", "= 'password' host = 'localhost' database = 'example' port =", "''' user = 'test' password = 'password' host = 'localhost'", "port = os.environ['POSTGRES_PORT'] ''' user = 'test' password = 'password'", "= os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user", "= os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user = 'test' password", "os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user =", "= os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database =", "host = 'localhost' database = 'example' port = '5432' DATABASE_CONNECTION_URI", "os ''' user = os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host =", "database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user = 'test'", "os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT']", "host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] '''", "import os ''' user = os.environ['POSTGRES_USER'] password = os.environ['<PASSWORD>'] host", "os.environ['POSTGRES_PORT'] ''' user = 'test' password = 'password' host =", "password = 'password' host = 'localhost' database = 'example' port", "= 'localhost' database = 'example' port = '5432' DATABASE_CONNECTION_URI =", "= 'test' password = 'password' host = 'localhost' database =", "= os.environ['POSTGRES_PORT'] ''' user = 'test' password = 'password' host", "'localhost' database = 'example' port = '5432' DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'", "password = os.environ['<PASSWORD>'] host = os.environ['POSTGRES_HOST'] database = os.environ['POSTGRES_DB'] port", "os.environ['POSTGRES_DB'] port = os.environ['POSTGRES_PORT'] ''' user = 'test' password =" ]
[ "b * (sx / n) print('{:.3f}'.format(a + b * 80))", "zip(*[(x, y, x**2, x * y) for x, y in", "= 5 xy = [map(int, input().split()) for _ in range(n)]", "xy = [map(int, input().split()) for _ in range(n)] sx, sy,", "input().split()) for _ in range(n)] sx, sy, sx2, sxy =", "n) - b * (sx / n) print('{:.3f}'.format(a + b", "- sx * sy) / (n * sx2 - sx**2)", "= map(sum, zip(*[(x, y, x**2, x * y) for x,", "Jun 3 19:26:47 2019 @author: sercangul \"\"\" n = 5", "coding: utf-8 -*- \"\"\" Created on Mon Jun 3 19:26:47", "(n * sx2 - sx**2) a = (sy / n)", "sx2 - sx**2) a = (sy / n) - b", "- b * (sx / n) print('{:.3f}'.format(a + b *", "* sxy - sx * sy) / (n * sx2", "# -*- coding: utf-8 -*- \"\"\" Created on Mon Jun", "sercangul \"\"\" n = 5 xy = [map(int, input().split()) for", "sx**2) a = (sy / n) - b * (sx", "xy])) b = (n * sxy - sx * sy)", "b = (n * sxy - sx * sy) /", "- sx**2) a = (sy / n) - b *", "in range(n)] sx, sy, sx2, sxy = map(sum, zip(*[(x, y,", "y) for x, y in xy])) b = (n *", "in xy])) b = (n * sxy - sx *", "* sy) / (n * sx2 - sx**2) a =", "3 19:26:47 2019 @author: sercangul \"\"\" n = 5 xy", "\"\"\" n = 5 xy = [map(int, input().split()) for _", "y, x**2, x * y) for x, y in xy]))", "n = 5 xy = [map(int, input().split()) for _ in", "@author: sercangul \"\"\" n = 5 xy = [map(int, input().split())", "for x, y in xy])) b = (n * sxy", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Mon", "2019 @author: sercangul \"\"\" n = 5 xy = [map(int,", "<reponame>sercangul/HackerRank #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created", "-*- \"\"\" Created on Mon Jun 3 19:26:47 2019 @author:", "= (sy / n) - b * (sx / n)", "Mon Jun 3 19:26:47 2019 @author: sercangul \"\"\" n =", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on", "for _ in range(n)] sx, sy, sx2, sxy = map(sum,", "[map(int, input().split()) for _ in range(n)] sx, sy, sx2, sxy", "\"\"\" Created on Mon Jun 3 19:26:47 2019 @author: sercangul", "on Mon Jun 3 19:26:47 2019 @author: sercangul \"\"\" n", "sx2, sxy = map(sum, zip(*[(x, y, x**2, x * y)", "y in xy])) b = (n * sxy - sx", "sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x *", "19:26:47 2019 @author: sercangul \"\"\" n = 5 xy =", "x**2, x * y) for x, y in xy])) b", "x, y in xy])) b = (n * sxy -", "sx * sy) / (n * sx2 - sx**2) a", "_ in range(n)] sx, sy, sx2, sxy = map(sum, zip(*[(x,", "a = (sy / n) - b * (sx /", "sy) / (n * sx2 - sx**2) a = (sy", "sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2, x", "x * y) for x, y in xy])) b =", "Created on Mon Jun 3 19:26:47 2019 @author: sercangul \"\"\"", "utf-8 -*- \"\"\" Created on Mon Jun 3 19:26:47 2019", "sxy = map(sum, zip(*[(x, y, x**2, x * y) for", "map(sum, zip(*[(x, y, x**2, x * y) for x, y", "(n * sxy - sx * sy) / (n *", "range(n)] sx, sy, sx2, sxy = map(sum, zip(*[(x, y, x**2,", "/ n) - b * (sx / n) print('{:.3f}'.format(a +", "* y) for x, y in xy])) b = (n", "sxy - sx * sy) / (n * sx2 -", "/ (n * sx2 - sx**2) a = (sy /", "* sx2 - sx**2) a = (sy / n) -", "= (n * sxy - sx * sy) / (n", "-*- coding: utf-8 -*- \"\"\" Created on Mon Jun 3", "5 xy = [map(int, input().split()) for _ in range(n)] sx,", "= [map(int, input().split()) for _ in range(n)] sx, sy, sx2,", "(sy / n) - b * (sx / n) print('{:.3f}'.format(a" ]
[ "]) def reset_obs(self, obs): state = np.insert(obs, 0, 0.) qpos", "def reset_obs(self, obs): state = np.insert(obs, 0, 0.) qpos =", "reset_obs(self, obs): state = np.insert(obs, 0, 0.) qpos = state[:self.model.nq]", "obs): state = np.insert(obs, 0, 0.) qpos = state[:self.model.nq] qvel", "import numpy as np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([", "0, 0.) qpos = state[:self.model.nq] qvel = state[self.model.nq:] self.set_state(qpos, qvel)", "numpy as np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:],", "class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def", "np.insert(obs, 0, 0.) qpos = state[:self.model.nq] qvel = state[self.model.nq:] self.set_state(qpos,", "as hopper import numpy as np class HopperEnv(hopper.HopperEnv): def _get_obs(self):", "np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ])", "self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state = np.insert(obs, 0, 0.)", "hopper import numpy as np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return", "as np class HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat,", "np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state = np.insert(obs,", "return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state =", "import gym.envs.mujoco.hopper as hopper import numpy as np class HopperEnv(hopper.HopperEnv):", "def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs):", "self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state = np.insert(obs, 0,", "_get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self, obs): state", "HopperEnv(hopper.HopperEnv): def _get_obs(self): return np.concatenate([ self.sim.data.qpos.flat[1:], self.sim.data.qvel.flat, ]) def reset_obs(self,", "state = np.insert(obs, 0, 0.) qpos = state[:self.model.nq] qvel =", "gym.envs.mujoco.hopper as hopper import numpy as np class HopperEnv(hopper.HopperEnv): def", "= np.insert(obs, 0, 0.) qpos = state[:self.model.nq] qvel = state[self.model.nq:]", "0.) qpos = state[:self.model.nq] qvel = state[self.model.nq:] self.set_state(qpos, qvel) return", "qpos = state[:self.model.nq] qvel = state[self.model.nq:] self.set_state(qpos, qvel) return self._get_obs()" ]
[ "An iterator that will yields parsed results, in the format", "self.batch_size: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list,", "# impression_id_list.append(impression_id) # cnt += 1 # if cnt ==", "further operation. # Args: # labels (list): a list of", "# range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) #", "placeholders for the model. # Args: # hparams (obj): Global", "= np.asarray([[label] for label in labels], dtype=np.float32) # res[\"dnn_feat_indices\"] =", "as np # import tensorflow as tf import abc class", "tf.float32, [None], name=\"fm_feat_values\" # ) # self.fm_feat_shape = tf.placeholder(tf.int64, [None],", "[] # cnt = 0 # if cnt > 0:", "(dict): a dictionary that maps string name to numpy arrays.", "a list of ground-truth labels. # features (list): a 3-dimensional", "MIT License. import numpy as np # import tensorflow as", "one line. # ID_spliter (str): ID splitter in one line.", "input data. # \"\"\" # def __init__(self, hparams, graph, col_spliter=\"", "# labels (list): a list of ground-truth labels. # features", "into feature values. # Args: # line (str): a string", "hyper-parameters. Some key settings such as #_feature and #_field are", "list: Parsed results,including label, features and impression_id # \"\"\" #", "self.graph = graph # with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32,", "tf.int64, [None], name=\"dnn_feat_shape\" # ) # def parser_one_line(self, line): #", "impression_id_list, cnt # def _convert_data(self, labels, features): # \"\"\"Convert data", "# def __init__(self, hparams, graph, col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize", "dictionary, contains multiple numpy arrays that are convenient for further", "graph. All created placeholder will be added to this graph.", "labels, features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass # class", "# Args: # data_dict (dict): a dictionary that maps string", "are convenient for further operation. # \"\"\" # dim =", "and impression_id # \"\"\" # impression_id = 0 # words", "in range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0]", "[None], name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape = tf.placeholder( # tf.int64,", "words[1].strip() # cols = words[0].strip().split(self.col_spliter) # label = float(cols[0]) #", "parser_one_line(self, line): # \"\"\"Parse one string line into feature values.", "# for j in range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2])", "graph. # col_spliter (str): column splitter in one line. #", "col_spliter (str): column splitter in one line. # ID_spliter (str):", "def _convert_data(self, labels, features): # \"\"\"Convert data into numpy arrays", "words[0].strip().split(self.col_spliter) # label = float(cols[0]) # features = [] #", "splitter in one line. # ID_spliter (str): ID splitter in", "# ] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index #", "format of graph feed_dict. # \"\"\" # label_list = []", "def __init__(self, hparams, graph, col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize an", "self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder( #", "name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape = tf.placeholder( # tf.int64, [None],", "feature array is a list of [field_idx, feature_idx, feature_value] tuple.", "arrays. # \"\"\" # feed_dict = { # self.labels: data_dict[\"labels\"],", "input file. Each line in this file is an instance.", "list, carrying a list (batch_size) of feature array, # where", "# \"\"\" # label_list = [] # features_list = []", "[instance_cnt * FIELD_COUNT, -1] # for i in range(instance_cnt): #", "# self.labels = tf.placeholder(tf.float32, [None, 1], name=\"label\") # self.fm_feat_indices =", "# \"\"\"Read and parse data from a file. # Args:", "#_feature and #_field are there. # graph (obj): the running", "# obj: An iterator that will yields parsed results, in", "self.dnn_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"dnn_feat_indices\" # )", "infile): pass @abc.abstractmethod def _convert_data(self, labels, features): pass @abc.abstractmethod def", "(list): a 3-dimensional list, carrying a list (batch_size) of feature", "# res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ] #", "where each feature array is a list of [field_idx, feature_idx,", "Licensed under the MIT License. import numpy as np #", "# dim = self.feature_cnt # FIELD_COUNT = self.field_cnt # instance_cnt", "hparams.FIELD_COUNT # self.col_spliter = col_spliter # self.ID_spliter = ID_spliter #", "1], name=\"label\") # self.fm_feat_indices = tf.placeholder( # tf.int64, [None, 2],", "dnn_feat_indices = [] # dnn_feat_values = [] # dnn_feat_weights =", "- 1, int(tokens[1]) - 1, float(tokens[2])]) # return label, features,", "that will yields parsed results, in the format of graph", "> 0: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res),", "name=\"label\") # self.fm_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"fm_feat_indices\"", "# dnn_feat_dic[features[i][j][0]] = 0 # else: # dnn_feat_dic[features[i][j][0]] += 1", "] # ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1]", "= float(cols[0]) # features = [] # for word in", "fm_feat_values = [] # fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices", "dnn_feat_dic = {} # for j in range(m): # fm_feat_indices.append([i,", "label_list = [] # features_list = [] # impression_id_list =", "# impression_id = 0 # words = line.strip().split(self.ID_spliter) # if", "# self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT # self.col_spliter", "for j in range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) #", "np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values,", "will yields parsed results, in the format of graph feed_dict.", "and #_field are there. # graph (obj): the running graph.", "_convert_data(self, labels, features): # \"\"\"Convert data into numpy arrays that", "carrying a list (batch_size) of feature array, # where each", "= 0 # words = line.strip().split(self.ID_spliter) # if len(words) ==", "not word.strip(): # continue # tokens = word.split(\":\") # features.append([int(tokens[0])", "# features_list = [] # impression_id_list = [] # cnt", "# ) # self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices", "# with tf.gfile.GFile(infile, \"r\") as rd: # for line in", "# for word in cols[1:]: # if not word.strip(): #", "feature_idx, feature_value] tuple. # Returns: # dict: A dictionary, contains", "self.ID_spliter = ID_spliter # self.batch_size = hparams.batch_size # self.graph =", "array, # where each feature array is a list of", "np.asarray(dnn_feat_shape, dtype=np.int64) # return res # def gen_feed_dict(self, data_dict): #", "column splitter in one line. # ID_spliter (str): ID splitter", "word in cols[1:]: # if not word.strip(): # continue #", "# Returns: # obj: An iterator that will yields parsed", "for further operation. # \"\"\" # dim = self.feature_cnt #", "\"r\") as rd: # for line in rd: # label,", "# Args: # infile (str): text input file. Each line", "for i in range(instance_cnt): # m = len(features[i]) # dnn_feat_dic", "self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"], # }", "self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list = [] # features_list =", "# ) # self.fm_feat_values = tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\"", "for the model. # Args: # hparams (obj): Global hyper-parameters.", "res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return res # def gen_feed_dict(self,", "data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"],", "load_data_from_file(self, infile): # \"\"\"Read and parse data from a file.", "into memory. Instead, it loads data into memory # per", "# impression_id_list = [] # cnt = 0 # with", "Create necessary placeholders for the model. # Args: # hparams", "in cols[1:]: # if not word.strip(): # continue # tokens", "self.dnn_feat_shape = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\" # ) #", ") # self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices =", "FIELD_COUNT = self.field_cnt # instance_cnt = len(labels) # fm_feat_indices =", "fm_feat_indices = [] # fm_feat_values = [] # fm_feat_shape =", "# self.batch_size = hparams.batch_size # self.graph = graph # with", "tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\" # ) # def parser_one_line(self,", "features_list = [] # impression_id_list = [] # cnt =", "# Args: # labels (list): a list of ground-truth labels.", "if features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 #", "# dict: a dictionary that maps graph elements to numpy", "as xDeepFM. # Iterator will not load the whole data", "tf import abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass", "# \"\"\" # self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT", "# features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) # return", "dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 # else: # dnn_feat_dic[features[i][j][0]] +=", "tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape =", "@abc.abstractmethod def _convert_data(self, labels, features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict):", "dnn_feat_indices[k][1]), # ) # res = {} # res[\"fm_feat_indices\"] =", "# self.field_cnt = hparams.FIELD_COUNT # self.col_spliter = col_spliter # self.ID_spliter", "xDeepFM. # Iterator will not load the whole data into", "] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ]", "] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return res #", "= np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"]", "\"\"\" # def __init__(self, hparams, graph, col_spliter=\" \", ID_spliter=\"%\"): #", "format based models, such as xDeepFM. # Iterator will not", "operation. # \"\"\" # dim = self.feature_cnt # FIELD_COUNT =", "= tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\" # ) # def", "= hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT # self.col_spliter = col_spliter", "added to this graph. # col_spliter (str): column splitter in", "= [] # cnt = 0 # with tf.gfile.GFile(infile, \"r\")", "= self.field_cnt # instance_cnt = len(labels) # fm_feat_indices = []", "range(instance_cnt): # m = len(features[i]) # dnn_feat_dic = {} #", "hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT # self.col_spliter = col_spliter #", "in this file is an instance. # Returns: # obj:", "(str): column splitter in one line. # ID_spliter (str): ID", "# dnn_feat_indices.append( # [ # i * FIELD_COUNT + features[i][j][0],", "# cnt = 0 # if cnt > 0: #", "k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res = {} #", "the MIT License. import numpy as np # import tensorflow", "dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape,", "in the format of graph feed_dict. # \"\"\" # label_list", "range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res", "\"\"\"Construct a dictionary that maps graph elements to values. #", "FFMTextIterator(BaseIterator): # \"\"\"Data loader for FFM format based models, such", "# def parser_one_line(self, line): # \"\"\"Parse one string line into", "# res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] = np.asarray([[label] for", "dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 # sorted_index =", "def parser_one_line(self, line): # \"\"\"Parse one string line into feature", "= 0 # with tf.gfile.GFile(infile, \"r\") as rd: # for", "self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT # self.col_spliter =", "# self.ID_spliter = ID_spliter # self.batch_size = hparams.batch_size # self.graph", "sorted_index # ] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index", "instance # Returns: # list: Parsed results,including label, features and", "tf.int64, [None, 2], name=\"fm_feat_indices\" # ) # self.fm_feat_values = tf.placeholder(", "label_list.append(label) # impression_id_list.append(impression_id) # cnt += 1 # if cnt", "_convert_data(self, labels, features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass #", "# self.dnn_feat_shape = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\" # )", "[field_idx, feature_idx, feature_value] tuple. # Returns: # dict: A dictionary,", "= tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape", "name=\"dnn_feat_shape\" # ) # def parser_one_line(self, line): # \"\"\"Parse one", "# cnt += 1 # if cnt == self.batch_size: #", "# hparams (obj): Global hyper-parameters. Some key settings such as", "ground-truth labels. # features (list): a 3-dimensional list, carrying a", "# yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list = [] #", "one string line into feature values. # Args: # line", "else: # dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( # [ #", "# continue # tokens = word.split(\":\") # features.append([int(tokens[0]) - 1,", "1 # dnn_feat_indices.append( # [ # i * FIELD_COUNT +", "# return res # def gen_feed_dict(self, data_dict): # \"\"\"Construct a", "dictionary that maps graph elements to values. # Args: #", "# res = {} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) #", "convenient for further operation. # \"\"\" # dim = self.feature_cnt", "an iterator. Create necessary placeholders for the model. # Args:", "graph feed_dict. # \"\"\" # label_list = [] # features_list", "key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res = {}", "tokens = word.split(\":\") # features.append([int(tokens[0]) - 1, int(tokens[1]) - 1,", "self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape:", "{} # for j in range(m): # fm_feat_indices.append([i, features[i][j][1]]) #", "[instance_cnt, dim] # dnn_feat_indices = [] # dnn_feat_values = []", "All created placeholder will be added to this graph. #", "necessary placeholders for the model. # Args: # hparams (obj):", "Some key settings such as #_feature and #_field are there.", "# fm_feat_values = [] # fm_feat_shape = [instance_cnt, dim] #", "with tf.gfile.GFile(infile, \"r\") as rd: # for line in rd:", "#_field are there. # graph (obj): the running graph. All", "to values. # Args: # data_dict (dict): a dictionary that", "[] # dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] # for", "large files can be used as input data. # \"\"\"", "= tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder( # tf.int64,", "def gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator): # \"\"\"Data loader", "# def gen_feed_dict(self, data_dict): # \"\"\"Construct a dictionary that maps", "= graph # with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None,", "tensorflow as tf import abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self,", "a 3-dimensional list, carrying a list (batch_size) of feature array,", "into numpy arrays that are good for further operation. #", "dnn_feat_shape[1] += 1 # sorted_index = sorted( # range(len(dnn_feat_indices)), #", "# sorted_index # ] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) #", "loader for FFM format based models, such as xDeepFM. #", "j in range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if", "# res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ] #", "= len(labels) # fm_feat_indices = [] # fm_feat_values = []", "# with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None, 1], name=\"label\")", "values. # Args: # line (str): a string indicating one", "= [] # dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] #", "== 2: # impression_id = words[1].strip() # cols = words[0].strip().split(self.col_spliter)", "cnt += 1 # if cnt == self.batch_size: # res", "features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] # ) # dnn_feat_values.append(features[i][j][1]) #", "np # import tensorflow as tf import abc class BaseIterator(object):", "tuple. # Returns: # dict: A dictionary, contains multiple numpy", "impression_id_list, self.batch_size # label_list = [] # features_list = []", "tf.gfile.GFile(infile, \"r\") as rd: # for line in rd: #", "{} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values,", "res # def gen_feed_dict(self, data_dict): # \"\"\"Construct a dictionary that", "# cols = words[0].strip().split(self.col_spliter) # label = float(cols[0]) # features", "feature_value] tuple. # Returns: # dict: A dictionary, contains multiple", "[None], name=\"dnn_feat_values\" # ) # self.dnn_feat_weights = tf.placeholder( # tf.float32,", "data from a file. # Args: # infile (str): text", "\"\"\" # impression_id = 0 # words = line.strip().split(self.ID_spliter) #", "np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] =", "indicating one instance # Returns: # list: Parsed results,including label,", "[] # dnn_feat_weights = [] # dnn_feat_shape = [instance_cnt *", "* FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] # )", "# res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64)", "# tf.int64, [None], name=\"dnn_feat_values\" # ) # self.dnn_feat_weights = tf.placeholder(", "be added to this graph. # col_spliter (str): column splitter", "an instance. # Returns: # obj: An iterator that will", "of graph feed_dict. # \"\"\" # label_list = [] #", "be used as input data. # \"\"\" # def __init__(self,", "\"\"\"Parse one string line into feature values. # Args: #", "# infile (str): text input file. Each line in this", "graph (obj): the running graph. All created placeholder will be", "= [] # fm_feat_values = [] # fm_feat_shape = [instance_cnt,", "data_dict (dict): a dictionary that maps string name to numpy", "list of ground-truth labels. # features (list): a 3-dimensional list,", "used as input data. # \"\"\" # def __init__(self, hparams,", ") # self.fm_feat_values = tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\" #", "a dictionary that maps graph elements to numpy arrays. #", "= np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_weights\"] =", "in one line. # ID_spliter (str): ID splitter in one", "res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size #", "@abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self, labels, features):", "memory # per mini-batch, so that large files can be", "continue # tokens = word.split(\":\") # features.append([int(tokens[0]) - 1, int(tokens[1])", "+= 1 # dnn_feat_indices.append( # [ # i * FIELD_COUNT", "Returns: # dict: a dictionary that maps graph elements to", "not load the whole data into memory. Instead, it loads", "# dict: A dictionary, contains multiple numpy arrays that are", "for further operation. # Args: # labels (list): a list", "ID splitter in one line. # \"\"\" # self.feature_cnt =", "= col_spliter # self.ID_spliter = ID_spliter # self.batch_size = hparams.batch_size", "self.labels = tf.placeholder(tf.float32, [None, 1], name=\"label\") # self.fm_feat_indices = tf.placeholder(", "def gen_feed_dict(self, data_dict): # \"\"\"Construct a dictionary that maps graph", "maps graph elements to values. # Args: # data_dict (dict):", "name=\"fm_feat_indices\" # ) # self.fm_feat_values = tf.placeholder( # tf.float32, [None],", "obj: An iterator that will yields parsed results, in the", "ID_spliter # self.batch_size = hparams.batch_size # self.graph = graph #", "model. # Args: # hparams (obj): Global hyper-parameters. Some key", "that maps graph elements to values. # Args: # data_dict", "# tf.float32, [None], name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape = tf.placeholder(", "rd: # label, features, impression_id = self.parser_one_line(line) # features_list.append(features) #", "one instance # Returns: # list: Parsed results,including label, features", "# dnn_feat_dic = {} # for j in range(m): #", "features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) # cnt += 1 #", "def parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod", "# dnn_feat_values = [] # dnn_feat_weights = [] # dnn_feat_shape", "import abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod", "\"\"\"Initialize an iterator. Create necessary placeholders for the model. #", "dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( # [ # i *", "data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"],", "# def load_data_from_file(self, infile): # \"\"\"Read and parse data from", "# list: Parsed results,including label, features and impression_id # \"\"\"", "2: # impression_id = words[1].strip() # cols = words[0].strip().split(self.col_spliter) #", "Parsed results,including label, features and impression_id # \"\"\" # impression_id", "that large files can be used as input data. #", "# i * FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ]", "cnt # def _convert_data(self, labels, features): # \"\"\"Convert data into", "# sorted_index # ] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ #", "load the whole data into memory. Instead, it loads data", "key settings such as #_feature and #_field are there. #", "# \"\"\"Data loader for FFM format based models, such as", "np.asarray([[label] for label in labels], dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices,", "self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self,", "that maps string name to numpy arrays. # Returns: #", "-1] # for i in range(instance_cnt): # m = len(features[i])", "features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in dnn_feat_dic: #", "# tf.int64, [None, 2], name=\"dnn_feat_indices\" # ) # self.dnn_feat_values =", "sorted_index # ] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index", "self.field_cnt # instance_cnt = len(labels) # fm_feat_indices = [] #", "rights reserved. # Licensed under the MIT License. import numpy", "impression_id # def load_data_from_file(self, infile): # \"\"\"Read and parse data", "+= 1 # if cnt == self.batch_size: # res =", "\"\"\" # self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt = hparams.FIELD_COUNT #", "a dictionary that maps graph elements to values. # Args:", "cnt > 0: # res = self._convert_data(label_list, features_list) # yield", "dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] = np.asarray([[label]", "tf.int64, [None, 2], name=\"dnn_feat_indices\" # ) # self.dnn_feat_values = tf.placeholder(", "feed_dict. # \"\"\" # label_list = [] # features_list =", "# self.dnn_feat_weights = tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\" # )", "label, features, impression_id # def load_data_from_file(self, infile): # \"\"\"Read and", "class FFMTextIterator(BaseIterator): # \"\"\"Data loader for FFM format based models,", "instance. # Returns: # obj: An iterator that will yields", "self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list =", "# Args: # hparams (obj): Global hyper-parameters. Some key settings", "self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values:", "# impression_id_list = [] # cnt = 0 # if", "# graph (obj): the running graph. All created placeholder will", "\", ID_spliter=\"%\"): # \"\"\"Initialize an iterator. Create necessary placeholders for", "splitter in one line. # \"\"\" # self.feature_cnt = hparams.FEATURE_COUNT", "self.batch_size = hparams.batch_size # self.graph = graph # with self.graph.as_default():", "of [field_idx, feature_idx, feature_value] tuple. # Returns: # dict: A", "# yield self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self, labels, features):", "res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) #", "as input data. # \"\"\" # def __init__(self, hparams, graph,", "# sorted_index = sorted( # range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0],", "features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list = []", "# self.col_spliter = col_spliter # self.ID_spliter = ID_spliter # self.batch_size", "if cnt == self.batch_size: # res = self._convert_data(label_list, features_list) #", "# fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices = [] #", "License. import numpy as np # import tensorflow as tf", "# self.dnn_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"dnn_feat_indices\" #", "a string indicating one instance # Returns: # list: Parsed", "is an instance. # Returns: # obj: An iterator that", "fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in dnn_feat_dic:", "dnn_feat_weights = [] # dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1]", "files can be used as input data. # \"\"\" #", "# dnn_feat_weights = [] # dnn_feat_shape = [instance_cnt * FIELD_COUNT,", "= self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt # def", "# \"\"\"Construct a dictionary that maps graph elements to values.", "tf.int64, [None], name=\"dnn_feat_values\" # ) # self.dnn_feat_weights = tf.placeholder( #", "that are good for further operation. # Args: # labels", "dict: A dictionary, contains multiple numpy arrays that are convenient", "[] # cnt = 0 # with tf.gfile.GFile(infile, \"r\") as", "dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] +=", "a dictionary that maps string name to numpy arrays. #", "cols = words[0].strip().split(self.col_spliter) # label = float(cols[0]) # features =", "per mini-batch, so that large files can be used as", "\"\"\" # feed_dict = { # self.labels: data_dict[\"labels\"], # self.fm_feat_indices:", "= words[0].strip().split(self.col_spliter) # label = float(cols[0]) # features = []", "# per mini-batch, so that large files can be used", "dnn_feat_values = [] # dnn_feat_weights = [] # dnn_feat_shape =", "name=\"dnn_feat_indices\" # ) # self.dnn_feat_values = tf.placeholder( # tf.int64, [None],", "this file is an instance. # Returns: # obj: An", "1 # if cnt == self.batch_size: # res = self._convert_data(label_list,", "# if features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0", "[None], name=\"dnn_feat_shape\" # ) # def parser_one_line(self, line): # \"\"\"Parse", "dnn_feat_dic[features[i][j][0]], # ] # ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) #", "(c) Microsoft Corporation. All rights reserved. # Licensed under the", "that maps graph elements to numpy arrays. # \"\"\" #", "text input file. Each line in this file is an", "= ID_spliter # self.batch_size = hparams.batch_size # self.graph = graph", "# if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] #", "into memory # per mini-batch, so that large files can", "= tf.placeholder( # tf.int64, [None, 2], name=\"dnn_feat_indices\" # ) #", "= self.feature_cnt # FIELD_COUNT = self.field_cnt # instance_cnt = len(labels)", "np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights,", "features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 # else:", "name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"dnn_feat_indices\"", "tf.placeholder(tf.float32, [None, 1], name=\"label\") # self.fm_feat_indices = tf.placeholder( # tf.int64,", "= 0 # else: # dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append(", "import tensorflow as tf import abc class BaseIterator(object): @abc.abstractmethod def", "def _convert_data(self, labels, features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass", "features and impression_id # \"\"\" # impression_id = 0 #", "string line into feature values. # Args: # line (str):", "# ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] <", "models, such as xDeepFM. # Iterator will not load the", "such as #_feature and #_field are there. # graph (obj):", "if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1]", "data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"],", "line in this file is an instance. # Returns: #", "# if cnt > 0: # res = self._convert_data(label_list, features_list)", "res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt #", "each feature array is a list of [field_idx, feature_idx, feature_value]", "# \"\"\"Convert data into numpy arrays that are good for", "settings such as #_feature and #_field are there. # graph", "= hparams.FIELD_COUNT # self.col_spliter = col_spliter # self.ID_spliter = ID_spliter", "the format of graph feed_dict. # \"\"\" # label_list =", "# fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]]", "line in rd: # label, features, impression_id = self.parser_one_line(line) #", "for label in labels], dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[", "[] # fm_feat_values = [] # fm_feat_shape = [instance_cnt, dim]", "class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self,", "can be used as input data. # \"\"\" # def", "in one line. # \"\"\" # self.feature_cnt = hparams.FEATURE_COUNT #", "# dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] # for i", "i in range(instance_cnt): # m = len(features[i]) # dnn_feat_dic =", "= {} # for j in range(m): # fm_feat_indices.append([i, features[i][j][1]])", "i * FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] #", "impression_id_list.append(impression_id) # cnt += 1 # if cnt == self.batch_size:", "tf.float32, [None], name=\"dnn_feat_weights\" # ) # self.dnn_feat_shape = tf.placeholder( #", "self.fm_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"fm_feat_indices\" # )", "words = line.strip().split(self.ID_spliter) # if len(words) == 2: # impression_id", "__init__(self, hparams, graph, col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize an iterator.", "yield self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self, labels, features): #", "label, features, impression_id = self.parser_one_line(line) # features_list.append(features) # label_list.append(label) #", "tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\" # ) # self.fm_feat_shape =", "yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list = [] # features_list", "= dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 # sorted_index = sorted(", "(str): ID splitter in one line. # \"\"\" # self.feature_cnt", "= [] # cnt = 0 # if cnt >", "from a file. # Args: # infile (str): text input", "1 # sorted_index = sorted( # range(len(dnn_feat_indices)), # key=lambda k:", "self.field_cnt = hparams.FIELD_COUNT # self.col_spliter = col_spliter # self.ID_spliter =", "is a list of [field_idx, feature_idx, feature_value] tuple. # Returns:", "the whole data into memory. Instead, it loads data into", "dim] # dnn_feat_indices = [] # dnn_feat_values = [] #", "that are convenient for further operation. # \"\"\" # dim", "to this graph. # col_spliter (str): column splitter in one", "features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self, labels,", "# label_list.append(label) # impression_id_list.append(impression_id) # cnt += 1 # if", "range(m): # fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not", "+= 1 # sorted_index = sorted( # range(len(dnn_feat_indices)), # key=lambda", "= word.split(\":\") # features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])])", "Returns: # list: Parsed results,including label, features and impression_id #", ") # self.dnn_feat_shape = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\" #", "dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 # sorted_index = sorted( #", "Args: # labels (list): a list of ground-truth labels. #", "# Returns: # dict: a dictionary that maps graph elements", "numpy arrays that are good for further operation. # Args:", "# dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] =", "data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"], # } #", "len(labels) # fm_feat_indices = [] # fm_feat_values = [] #", "# ) # res = {} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices,", "labels (list): a list of ground-truth labels. # features (list):", "dict: a dictionary that maps graph elements to numpy arrays.", "[None, 2], name=\"fm_feat_indices\" # ) # self.fm_feat_values = tf.placeholder( #", "# label, features, impression_id = self.parser_one_line(line) # features_list.append(features) # label_list.append(label)", "def load_data_from_file(self, infile): # \"\"\"Read and parse data from a", "contains multiple numpy arrays that are convenient for further operation.", "operation. # Args: # labels (list): a list of ground-truth", "# return label, features, impression_id # def load_data_from_file(self, infile): #", "self.parser_one_line(line) # features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) # cnt +=", "dnn_feat_dic[features[i][j][0]] = 0 # else: # dnn_feat_dic[features[i][j][0]] += 1 #", "[ # i * FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], #", "= hparams.batch_size # self.graph = graph # with self.graph.as_default(): #", "line): pass @abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self,", "gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator): # \"\"\"Data loader for", "# label_list = [] # features_list = [] # impression_id_list", "elements to numpy arrays. # \"\"\" # feed_dict = {", "BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self, infile):", "# dnn_feat_dic[features[i][j][0]], # ] # ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2])", "# ) # self.dnn_feat_weights = tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\"", "data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"],", "# impression_id = words[1].strip() # cols = words[0].strip().split(self.col_spliter) # label", "import numpy as np # import tensorflow as tf import", "self.dnn_feat_values = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\" # ) #", "results, in the format of graph feed_dict. # \"\"\" #", "# ) # def parser_one_line(self, line): # \"\"\"Parse one string", "such as xDeepFM. # Iterator will not load the whole", "so that large files can be used as input data.", "[] # fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices = []", "# dnn_feat_shape[1] += 1 # sorted_index = sorted( # range(len(dnn_feat_indices)),", "numpy arrays that are convenient for further operation. # \"\"\"", "as rd: # for line in rd: # label, features,", "Each line in this file is an instance. # Returns:", "array is a list of [field_idx, feature_idx, feature_value] tuple. #", "further operation. # \"\"\" # dim = self.feature_cnt # FIELD_COUNT", "# sorted_index # ] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ #", "# self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], #", "self.gen_feed_dict(res), impression_id_list, cnt # def _convert_data(self, labels, features): # \"\"\"Convert", "instance_cnt = len(labels) # fm_feat_indices = [] # fm_feat_values =", "= np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_values\"] =", "will be added to this graph. # col_spliter (str): column", "created placeholder will be added to this graph. # col_spliter", "# m = len(features[i]) # dnn_feat_dic = {} # for", "to numpy arrays. # Returns: # dict: a dictionary that", "# tokens = word.split(\":\") # features.append([int(tokens[0]) - 1, int(tokens[1]) -", "# feed_dict = { # self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"],", "= [] # impression_id_list = [] # cnt = 0", "Args: # infile (str): text input file. Each line in", "dtype=np.int64) # res[\"labels\"] = np.asarray([[label] for label in labels], dtype=np.float32)", "Returns: # obj: An iterator that will yields parsed results,", "= [] # dnn_feat_weights = [] # dnn_feat_shape = [instance_cnt", "# else: # dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( # [", "labels], dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index #", "# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed", "dtype=np.float32)[ # sorted_index # ] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64)", "# [ # i * FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]],", "self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"], # } # return feed_dict", "# self.dnn_feat_values = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\" # )", "yields parsed results, in the format of graph feed_dict. #", "impression_id = self.parser_one_line(line) # features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) #", "# features (list): a 3-dimensional list, carrying a list (batch_size)", "= [] # dnn_feat_values = [] # dnn_feat_weights = []", "res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ] # res[\"dnn_feat_shape\"]", "# label = float(cols[0]) # features = [] # for", "0 # if cnt > 0: # res = self._convert_data(label_list,", "# res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32)", "dim = self.feature_cnt # FIELD_COUNT = self.field_cnt # instance_cnt =", "[] # for word in cols[1:]: # if not word.strip():", "data into memory # per mini-batch, so that large files", "# tf.int64, [None, 2], name=\"fm_feat_indices\" # ) # self.fm_feat_values =", "file. # Args: # infile (str): text input file. Each", "# res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size", "# self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], #", "pass # class FFMTextIterator(BaseIterator): # \"\"\"Data loader for FFM format", "for line in rd: # label, features, impression_id = self.parser_one_line(line)", "\"\"\"Convert data into numpy arrays that are good for further", "\"\"\"Data loader for FFM format based models, such as xDeepFM.", "in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 # else: # dnn_feat_dic[features[i][j][0]]", "# instance_cnt = len(labels) # fm_feat_indices = [] # fm_feat_values", "# if not word.strip(): # continue # tokens = word.split(\":\")", "Corporation. All rights reserved. # Licensed under the MIT License.", "dictionary that maps graph elements to numpy arrays. # \"\"\"", "ID_spliter (str): ID splitter in one line. # \"\"\" #", "# FIELD_COUNT = self.field_cnt # instance_cnt = len(labels) # fm_feat_indices", "# Args: # line (str): a string indicating one instance", "# fm_feat_indices.append([i, features[i][j][1]]) # fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in", "Returns: # dict: A dictionary, contains multiple numpy arrays that", "* FIELD_COUNT, -1] # for i in range(instance_cnt): # m", "@abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self, infile): pass", "feed_dict = { # self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], #", "# import tensorflow as tf import abc class BaseIterator(object): @abc.abstractmethod", "self.col_spliter = col_spliter # self.ID_spliter = ID_spliter # self.batch_size =", "# fm_feat_indices = [] # fm_feat_values = [] # fm_feat_shape", "# res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return res # def", "data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"],", ") # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]:", "gen_feed_dict(self, data_dict): # \"\"\"Construct a dictionary that maps graph elements", "tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder( # tf.int64, [None,", "res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_weights\"]", "running graph. All created placeholder will be added to this", "Iterator will not load the whole data into memory. Instead,", "hparams (obj): Global hyper-parameters. Some key settings such as #_feature", "1, int(tokens[1]) - 1, float(tokens[2])]) # return label, features, impression_id", "int(tokens[1]) - 1, float(tokens[2])]) # return label, features, impression_id #", "m = len(features[i]) # dnn_feat_dic = {} # for j", "FIELD_COUNT, -1] # for i in range(instance_cnt): # m =", "return label, features, impression_id # def load_data_from_file(self, infile): # \"\"\"Read", "iterator that will yields parsed results, in the format of", "in rd: # label, features, impression_id = self.parser_one_line(line) # features_list.append(features)", "@abc.abstractmethod def gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator): # \"\"\"Data", "[None, 1], name=\"label\") # self.fm_feat_indices = tf.placeholder( # tf.int64, [None,", "# self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder(", "\"\"\"Read and parse data from a file. # Args: #", "dnn_feat_indices.append( # [ # i * FIELD_COUNT + features[i][j][0], #", "< dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1", "cnt == self.batch_size: # res = self._convert_data(label_list, features_list) # yield", "col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize an iterator. Create necessary placeholders", "sorted_index = sorted( # range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]),", "# ] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[ # sorted_index #", "pass @abc.abstractmethod def _convert_data(self, labels, features): pass @abc.abstractmethod def gen_feed_dict(self,", "elements to values. # Args: # data_dict (dict): a dictionary", "# Iterator will not load the whole data into memory.", "file. Each line in this file is an instance. #", "np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape,", "graph elements to values. # Args: # data_dict (dict): a", "FFM format based models, such as xDeepFM. # Iterator will", "A dictionary, contains multiple numpy arrays that are convenient for", "# self.graph = graph # with self.graph.as_default(): # self.labels =", "# for i in range(instance_cnt): # m = len(features[i]) #", "label = float(cols[0]) # features = [] # for word", "of ground-truth labels. # features (list): a 3-dimensional list, carrying", "# self.fm_feat_indices = tf.placeholder( # tf.int64, [None, 2], name=\"fm_feat_indices\" #", "iterator. Create necessary placeholders for the model. # Args: #", "# ) # self.dnn_feat_shape = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_shape\"", "# features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) # cnt += 1", "# words = line.strip().split(self.ID_spliter) # if len(words) == 2: #", "good for further operation. # Args: # labels (list): a", "features (list): a 3-dimensional list, carrying a list (batch_size) of", "# features = [] # for word in cols[1:]: #", "dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1]", "2], name=\"dnn_feat_indices\" # ) # self.dnn_feat_values = tf.placeholder( # tf.int64,", "= self.parser_one_line(line) # features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id) # cnt", "float(tokens[2])]) # return label, features, impression_id # def load_data_from_file(self, infile):", "features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) # return label,", "will not load the whole data into memory. Instead, it", "dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] # for i in", ") # self.dnn_feat_weights = tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\" #", "name to numpy arrays. # Returns: # dict: a dictionary", "= tf.placeholder(tf.float32, [None, 1], name=\"label\") # self.fm_feat_indices = tf.placeholder( #", "res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_values\"]", "string name to numpy arrays. # Returns: # dict: a", "infile (str): text input file. Each line in this file", "Microsoft Corporation. All rights reserved. # Licensed under the MIT", "arrays that are good for further operation. # Args: #", "Global hyper-parameters. Some key settings such as #_feature and #_field", "in range(instance_cnt): # m = len(features[i]) # dnn_feat_dic = {}", "dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index # ]", "(str): text input file. Each line in this file is", "if cnt > 0: # res = self._convert_data(label_list, features_list) #", "self.fm_feat_values = tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\" # ) #", "[None], name=\"fm_feat_values\" # ) # self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\")", "line. # ID_spliter (str): ID splitter in one line. #", "self.dnn_feat_weights = tf.placeholder( # tf.float32, [None], name=\"dnn_feat_weights\" # ) #", "= tf.placeholder( # tf.int64, [None, 2], name=\"fm_feat_indices\" # ) #", "2], name=\"fm_feat_indices\" # ) # self.fm_feat_values = tf.placeholder( # tf.float32,", "rd: # for line in rd: # label, features, impression_id", "# ) # self.dnn_feat_values = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\"", "dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_values\"] = np.asarray(dnn_feat_values, dtype=np.int64)[", "label, features and impression_id # \"\"\" # impression_id = 0", "Args: # data_dict (dict): a dictionary that maps string name", "features): # \"\"\"Convert data into numpy arrays that are good", "data. # \"\"\" # def __init__(self, hparams, graph, col_spliter=\" \",", "tf.placeholder( # tf.int64, [None, 2], name=\"fm_feat_indices\" # ) # self.fm_feat_values", "Args: # hparams (obj): Global hyper-parameters. Some key settings such", "# if len(words) == 2: # impression_id = words[1].strip() #", "= 0 # if cnt > 0: # res =", "sorted( # range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # )", "based models, such as xDeepFM. # Iterator will not load", "[] # dnn_feat_values = [] # dnn_feat_weights = [] #", "hparams.batch_size # self.graph = graph # with self.graph.as_default(): # self.labels", "are good for further operation. # Args: # labels (list):", "numpy arrays. # \"\"\" # feed_dict = { # self.labels:", "pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator): #", "line (str): a string indicating one instance # Returns: #", "len(words) == 2: # impression_id = words[1].strip() # cols =", "= { # self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values:", "res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) #", "= [] # features_list = [] # impression_id_list = []", "labels, features): # \"\"\"Convert data into numpy arrays that are", "name=\"dnn_feat_values\" # ) # self.dnn_feat_weights = tf.placeholder( # tf.float32, [None],", "# \"\"\"Parse one string line into feature values. # Args:", "Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under", "def load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self, labels, features): pass", "graph # with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None, 1],", "hparams, graph, col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize an iterator. Create", "a list of [field_idx, feature_idx, feature_value] tuple. # Returns: #", "# key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res =", "data_dict): # \"\"\"Construct a dictionary that maps graph elements to", "to numpy arrays. # \"\"\" # feed_dict = { #", "the running graph. All created placeholder will be added to", "reserved. # Licensed under the MIT License. import numpy as", "np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] =", "# res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ] #", "cnt = 0 # with tf.gfile.GFile(infile, \"r\") as rd: #", ") # def parser_one_line(self, line): # \"\"\"Parse one string line", "(list): a list of ground-truth labels. # features (list): a", "# \"\"\"Initialize an iterator. Create necessary placeholders for the model.", "data into memory. Instead, it loads data into memory #", "impression_id = 0 # words = line.strip().split(self.ID_spliter) # if len(words)", "# res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, cnt", "load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self, labels, features): pass @abc.abstractmethod", "Instead, it loads data into memory # per mini-batch, so", "(dnn_feat_indices[k][0], dnn_feat_indices[k][1]), # ) # res = {} # res[\"fm_feat_indices\"]", "float(cols[0]) # features = [] # for word in cols[1:]:", "dtype=np.int64)[ # sorted_index # ] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[", "values. # Args: # data_dict (dict): a dictionary that maps", ") # self.dnn_feat_values = tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\" #", "0 # else: # dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( #", "and parse data from a file. # Args: # infile", "0: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list,", "for FFM format based models, such as xDeepFM. # Iterator", "line. # \"\"\" # self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt =", "are there. # graph (obj): the running graph. All created", "# \"\"\" # impression_id = 0 # words = line.strip().split(self.ID_spliter)", "dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 #", "with self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None, 1], name=\"label\") #", "res[\"labels\"] = np.asarray([[label] for label in labels], dtype=np.float32) # res[\"dnn_feat_indices\"]", "- 1, float(tokens[2])]) # return label, features, impression_id # def", "(batch_size) of feature array, # where each feature array is", "pass @abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod def _convert_data(self, labels,", "fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices = [] # dnn_feat_values", "# dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] # dnn_feat_shape[1] += 1 # sorted_index", "memory. Instead, it loads data into memory # per mini-batch,", "self.batch_size # label_list = [] # features_list = [] #", "results,including label, features and impression_id # \"\"\" # impression_id =", "if len(words) == 2: # impression_id = words[1].strip() # cols", "parse data from a file. # Args: # infile (str):", "self.graph.as_default(): # self.labels = tf.placeholder(tf.float32, [None, 1], name=\"label\") # self.fm_feat_indices", "word.split(\":\") # features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) #", "# dnn_feat_dic[features[i][j][0]] += 1 # dnn_feat_indices.append( # [ # i", "# for line in rd: # label, features, impression_id =", "len(features[i]) # dnn_feat_dic = {} # for j in range(m):", "+ features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] # ) # dnn_feat_values.append(features[i][j][1])", "(obj): Global hyper-parameters. Some key settings such as #_feature and", "= line.strip().split(self.ID_spliter) # if len(words) == 2: # impression_id =", "name=\"fm_feat_values\" # ) # self.fm_feat_shape = tf.placeholder(tf.int64, [None], name=\"fm_feat_shape\") #", "== self.batch_size: # res = self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res),", "= {} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] =", "# \"\"\" # dim = self.feature_cnt # FIELD_COUNT = self.field_cnt", "= [instance_cnt * FIELD_COUNT, -1] # for i in range(instance_cnt):", "# tf.float32, [None], name=\"fm_feat_values\" # ) # self.fm_feat_shape = tf.placeholder(tf.int64,", "dictionary that maps string name to numpy arrays. # Returns:", "= len(features[i]) # dnn_feat_dic = {} # for j in", "not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] = 0 # else: #", "= sorted( # range(len(dnn_feat_indices)), # key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), #", "maps string name to numpy arrays. # Returns: # dict:", "fm_feat_values.append(features[i][j][2]) # if features[i][j][0] not in dnn_feat_dic: # dnn_feat_dic[features[i][j][0]] =", "[] # features_list = [] # impression_id_list = [] #", "graph, col_spliter=\" \", ID_spliter=\"%\"): # \"\"\"Initialize an iterator. Create necessary", "label in labels], dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ #", "numpy as np # import tensorflow as tf import abc", "# self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"], #", "(str): a string indicating one instance # Returns: # list:", "impression_id_list = [] # cnt = 0 # if cnt", "1, float(tokens[2])]) # return label, features, impression_id # def load_data_from_file(self,", "cnt = 0 # if cnt > 0: # res", "of feature array, # where each feature array is a", "<reponame>yutian-zhao/recommenders # Copyright (c) Microsoft Corporation. All rights reserved. #", "# ] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return res", "# Returns: # list: Parsed results,including label, features and impression_id", "# class FFMTextIterator(BaseIterator): # \"\"\"Data loader for FFM format based", "dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: # dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]]", "parsed results, in the format of graph feed_dict. # \"\"\"", "labels. # features (list): a 3-dimensional list, carrying a list", "self.feature_cnt # FIELD_COUNT = self.field_cnt # instance_cnt = len(labels) #", "if not word.strip(): # continue # tokens = word.split(\":\") #", "feature array, # where each feature array is a list", "# \"\"\" # def __init__(self, hparams, graph, col_spliter=\" \", ID_spliter=\"%\"):", "= tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\" # ) # self.dnn_feat_weights", "feature values. # Args: # line (str): a string indicating", "self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices:", "maps graph elements to numpy arrays. # \"\"\" # feed_dict", "[None], name=\"fm_feat_shape\") # self.dnn_feat_indices = tf.placeholder( # tf.int64, [None, 2],", "# self.fm_feat_values = tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\" # )", "# where each feature array is a list of [field_idx,", "= [] # fm_feat_shape = [instance_cnt, dim] # dnn_feat_indices =", "features, impression_id = self.parser_one_line(line) # features_list.append(features) # label_list.append(label) # impression_id_list.append(impression_id)", "# if cnt == self.batch_size: # res = self._convert_data(label_list, features_list)", "= [instance_cnt, dim] # dnn_feat_indices = [] # dnn_feat_values =", "ID_spliter=\"%\"): # \"\"\"Initialize an iterator. Create necessary placeholders for the", "impression_id_list = [] # cnt = 0 # with tf.gfile.GFile(infile,", "cols[1:]: # if not word.strip(): # continue # tokens =", "whole data into memory. Instead, it loads data into memory", "parser_one_line(self, line): pass @abc.abstractmethod def load_data_from_file(self, infile): pass @abc.abstractmethod def", "res = {} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"]", "{ # self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"],", "[None, 2], name=\"dnn_feat_indices\" # ) # self.dnn_feat_values = tf.placeholder( #", "# dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: #", "# self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], #", "# \"\"\" # feed_dict = { # self.labels: data_dict[\"labels\"], #", "data into numpy arrays that are good for further operation.", "= self._convert_data(label_list, features_list) # yield self.gen_feed_dict(res), impression_id_list, self.batch_size # label_list", "0 # with tf.gfile.GFile(infile, \"r\") as rd: # for line", "graph elements to numpy arrays. # \"\"\" # feed_dict =", "# Returns: # dict: A dictionary, contains multiple numpy arrays", "it loads data into memory # per mini-batch, so that", "# ] # ) # dnn_feat_values.append(features[i][j][1]) # dnn_feat_weights.append(features[i][j][2]) # if", "(obj): the running graph. All created placeholder will be added", "3-dimensional list, carrying a list (batch_size) of feature array, #", "= tf.placeholder( # tf.float32, [None], name=\"fm_feat_values\" # ) # self.fm_feat_shape", "a list (batch_size) of feature array, # where each feature", "line): # \"\"\"Parse one string line into feature values. #", "a file. # Args: # infile (str): text input file.", "self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], # self.fm_feat_shape:", "infile): # \"\"\"Read and parse data from a file. #", "list of [field_idx, feature_idx, feature_value] tuple. # Returns: # dict:", "dtype=np.int64) # return res # def gen_feed_dict(self, data_dict): # \"\"\"Construct", "# data_dict (dict): a dictionary that maps string name to", "the model. # Args: # hparams (obj): Global hyper-parameters. Some", "multiple numpy arrays that are convenient for further operation. #", "# self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], #", "impression_id # \"\"\" # impression_id = 0 # words =", "placeholder will be added to this graph. # col_spliter (str):", "one line. # \"\"\" # self.feature_cnt = hparams.FEATURE_COUNT # self.field_cnt", "under the MIT License. import numpy as np # import", "FIELD_COUNT + features[i][j][0], # dnn_feat_dic[features[i][j][0]], # ] # ) #", "0 # words = line.strip().split(self.ID_spliter) # if len(words) == 2:", "# Licensed under the MIT License. import numpy as np", "tf.placeholder( # tf.int64, [None, 2], name=\"dnn_feat_indices\" # ) # self.dnn_feat_values", "# tf.int64, [None], name=\"dnn_feat_shape\" # ) # def parser_one_line(self, line):", "for word in cols[1:]: # if not word.strip(): # continue", "[] # impression_id_list = [] # cnt = 0 #", "string indicating one instance # Returns: # list: Parsed results,including", "there. # graph (obj): the running graph. All created placeholder", "file is an instance. # Returns: # obj: An iterator", "data_dict): pass # class FFMTextIterator(BaseIterator): # \"\"\"Data loader for FFM", "= [] # for word in cols[1:]: # if not", "] # res[\"dnn_feat_weights\"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ]", "numpy arrays. # Returns: # dict: a dictionary that maps", "word.strip(): # continue # tokens = word.split(\":\") # features.append([int(tokens[0]) -", "# dnn_feat_indices = [] # dnn_feat_values = [] # dnn_feat_weights", "features, impression_id # def load_data_from_file(self, infile): # \"\"\"Read and parse", ") # res = {} # res[\"fm_feat_indices\"] = np.asarray(fm_feat_indices, dtype=np.int64)", "in labels], dtype=np.float32) # res[\"dnn_feat_indices\"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ # sorted_index", "# col_spliter (str): column splitter in one line. # ID_spliter", "res[\"fm_feat_shape\"] = np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] = np.asarray([[label] for label", "= np.asarray(dnn_feat_weights, dtype=np.float32)[ # sorted_index # ] # res[\"dnn_feat_shape\"] =", "line into feature values. # Args: # line (str): a", "# cnt = 0 # with tf.gfile.GFile(infile, \"r\") as rd:", "# line (str): a string indicating one instance # Returns:", "# res[\"labels\"] = np.asarray([[label] for label in labels], dtype=np.float32) #", "features): pass @abc.abstractmethod def gen_feed_dict(self, data_dict): pass # class FFMTextIterator(BaseIterator):", "\"\"\" # dim = self.feature_cnt # FIELD_COUNT = self.field_cnt #", "np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] = np.asarray([[label] for label in labels],", "return res # def gen_feed_dict(self, data_dict): # \"\"\"Construct a dictionary", "self.fm_feat_shape: data_dict[\"fm_feat_shape\"], # self.dnn_feat_indices: data_dict[\"dnn_feat_indices\"], # self.dnn_feat_values: data_dict[\"dnn_feat_values\"], # self.dnn_feat_weights:", "this graph. # col_spliter (str): column splitter in one line.", "arrays that are convenient for further operation. # \"\"\" #", "as tf import abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line):", "Args: # line (str): a string indicating one instance #", "= np.asarray(fm_feat_indices, dtype=np.int64) # res[\"fm_feat_values\"] = np.asarray(fm_feat_values, dtype=np.float32) # res[\"fm_feat_shape\"]", "tf.placeholder( # tf.int64, [None], name=\"dnn_feat_values\" # ) # self.dnn_feat_weights =", "# def _convert_data(self, labels, features): # \"\"\"Convert data into numpy", "All rights reserved. # Licensed under the MIT License. import", "list (batch_size) of feature array, # where each feature array", "abc class BaseIterator(object): @abc.abstractmethod def parser_one_line(self, line): pass @abc.abstractmethod def", "= np.asarray(fm_feat_shape, dtype=np.int64) # res[\"labels\"] = np.asarray([[label] for label in", "= np.asarray(dnn_feat_shape, dtype=np.int64) # return res # def gen_feed_dict(self, data_dict):", "mini-batch, so that large files can be used as input", "col_spliter # self.ID_spliter = ID_spliter # self.batch_size = hparams.batch_size #", "sorted_index # ] # res[\"dnn_feat_shape\"] = np.asarray(dnn_feat_shape, dtype=np.int64) # return", "\"\"\" # label_list = [] # features_list = [] #", "features = [] # for word in cols[1:]: # if", "# self.dnn_feat_weights: data_dict[\"dnn_feat_weights\"], # self.dnn_feat_shape: data_dict[\"dnn_feat_shape\"], # } # return", "loads data into memory # per mini-batch, so that large", "impression_id = words[1].strip() # cols = words[0].strip().split(self.col_spliter) # label =", "# self.labels: data_dict[\"labels\"], # self.fm_feat_indices: data_dict[\"fm_feat_indices\"], # self.fm_feat_values: data_dict[\"fm_feat_values\"], #", "as #_feature and #_field are there. # graph (obj): the", "arrays. # Returns: # dict: a dictionary that maps graph", "# ID_spliter (str): ID splitter in one line. # \"\"\"", "= words[1].strip() # cols = words[0].strip().split(self.col_spliter) # label = float(cols[0])", "line.strip().split(self.ID_spliter) # if len(words) == 2: # impression_id = words[1].strip()" ]
[ "create_array(n): res=[] i=1 while i<=n: res.append(i) i += 1 return", "def create_array(n): res=[] i=1 while i<=n: res.append(i) i += 1", "res=[] i=1 while i<=n: res.append(i) i += 1 return res" ]
[ ":return: \"\"\" url = song_download_url csrf = '' params =", "'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url, params) song_url =", "get_song(self, song_id): \"\"\" Get song info by song id :param", ":return: \"\"\" url = get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs']", "{} => {}'.format(result, params, url)) else: return result def get_song(self,", "'HD 320k': 320000} :return: \"\"\" url = song_download_url csrf =", "'' params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result", "self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\" Get a public", "self.session.get(url, timeout=self.timeout) result = response.json() if result['code'] != 200: print('Return", "!= 200: print('Return {} when try to get {}'.format(result, url))", "result def get_song(self, song_id): \"\"\" Get song info by song", "from ncm.constants import get_playlist_url class CloudApi(object): def __init__(self, timeout=30): super().__init__()", "url = get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs'] def get_playlist_songs(self,", "song's download url. :params song_id: song id<int>. :params bit_rate: {'MD", "-*- import requests from ncm.encrypt import encrypted_request from ncm.constants import", "song_id: song id<int>. :params bit_rate: {'MD 128k': 128000, 'HD 320k':", "playlist_id: :return: \"\"\" url = get_playlist_url(playlist_id) result = self.get_request(url) return", "headers from ncm.constants import song_download_url from ncm.constants import get_song_url from", "import get_song_url from ncm.constants import get_album_url from ncm.constants import get_artist_url", "\"\"\" Get a artist 50 hot songs :param artist_id: :return:", "song id :param song_id: :return: \"\"\" url = get_song_url(song_id) result", "album_id): \"\"\" Get all album songs info by album id", "def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a song's download url. :params", "128000, 'HD 320k': 320000} :return: \"\"\" url = song_download_url csrf", "try to post {} => {}'.format(result, params, url)) else: return", "self.session.headers.update(headers) self.timeout = timeout def get_request(self, url): response = self.session.get(url,", "= self.session.get(url, timeout=self.timeout) result = response.json() if result['code'] != 200:", "coding: utf-8 -*- import requests from ncm.encrypt import encrypted_request from", "post {} => {}'.format(result, params, url)) else: return result def", "return song_url def get_hot_songs(self, artist_id): \"\"\" Get a artist 50", "ncm.encrypt import encrypted_request from ncm.constants import headers from ncm.constants import", "import encrypted_request from ncm.constants import headers from ncm.constants import song_download_url", "result['songs'][0] def get_album_songs(self, album_id): \"\"\" Get all album songs info", "hot songs :param artist_id: :return: \"\"\" url = get_artist_url(artist_id) result", "url)) else: return result def post_request(self, url, params): data =", "timeout=self.timeout) result = response.json() if result['code'] != 200: print('Return {}", "{'MD 128k': 128000, 'HD 320k': 320000} :return: \"\"\" url =", "= song_download_url csrf = '' params = {'ids': [song_id], 'br':", ":param playlist_id: :return: \"\"\" url = get_playlist_url(playlist_id) result = self.get_request(url)", "def get_request(self, url): response = self.session.get(url, timeout=self.timeout) result = response.json()", "id :param song_id: :return: \"\"\" url = get_song_url(song_id) result =", "requests from ncm.encrypt import encrypted_request from ncm.constants import headers from", "post_request(self, url, params): data = encrypted_request(params) response = self.session.post(url, data=data,", "requests.session() self.session.headers.update(headers) self.timeout = timeout def get_request(self, url): response =", "response = self.session.post(url, data=data, timeout=self.timeout) result = response.json() if result['code']", "id<int>. :params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000} :return:", "{} when try to post {} => {}'.format(result, params, url))", "songs :param artist_id: :return: \"\"\" url = get_artist_url(artist_id) result =", "def __init__(self, timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout =", "album_id: :return: \"\"\" url = get_album_url(album_id) result = self.get_request(url) return", "url = song_download_url csrf = '' params = {'ids': [song_id],", "!= 200: print('Return {} when try to post {} =>", ":param song_id: :return: \"\"\" url = get_song_url(song_id) result = self.get_request(url)", "get_hot_songs(self, artist_id): \"\"\" Get a artist 50 hot songs :param", "when try to get {}'.format(result, url)) else: return result def", "-*- coding: utf-8 -*- import requests from ncm.encrypt import encrypted_request", "= self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a", "Get a artist 50 hot songs :param artist_id: :return: \"\"\"", "url): response = self.session.get(url, timeout=self.timeout) result = response.json() if result['code']", "to get {}'.format(result, url)) else: return result def post_request(self, url,", "'csrf_token': csrf} result = self.post_request(url, params) song_url = result['data'][0]['url'] return", "by album id :param album_id: :return: \"\"\" url = get_album_url(album_id)", "= timeout def get_request(self, url): response = self.session.get(url, timeout=self.timeout) result", "self.session.post(url, data=data, timeout=self.timeout) result = response.json() if result['code'] != 200:", "=> {}'.format(result, params, url)) else: return result def get_song(self, song_id):", "<reponame>SDhuangao/netease-cloud-music-dl # -*- coding: utf-8 -*- import requests from ncm.encrypt", "url)) else: return result def get_song(self, song_id): \"\"\" Get song", "response = self.session.get(url, timeout=self.timeout) result = response.json() if result['code'] !=", "result def post_request(self, url, params): data = encrypted_request(params) response =", "self.post_request(url, params) song_url = result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id):", "try to get {}'.format(result, url)) else: return result def post_request(self,", "def get_album_songs(self, album_id): \"\"\" Get all album songs info by", "def get_hot_songs(self, artist_id): \"\"\" Get a artist 50 hot songs", "class CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers)", "params, url)) else: return result def get_song(self, song_id): \"\"\" Get", "return result def post_request(self, url, params): data = encrypted_request(params) response", "return result def get_song(self, song_id): \"\"\" Get song info by", "result = response.json() if result['code'] != 200: print('Return {} when", "response.json() if result['code'] != 200: print('Return {} when try to", "song_id, bit_rate=320000): \"\"\"Get a song's download url. :params song_id: song", "url. :params song_id: song id<int>. :params bit_rate: {'MD 128k': 128000,", "playlist_id): \"\"\" Get a public playlist all songs :param playlist_id:", "\"\"\" url = get_song_url(song_id) result = self.get_request(url) return result['songs'][0] def", "get_album_url from ncm.constants import get_artist_url from ncm.constants import get_playlist_url class", "self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id): \"\"\" Get all album", "self.session = requests.session() self.session.headers.update(headers) self.timeout = timeout def get_request(self, url):", "if result['code'] != 200: print('Return {} when try to post", "def get_playlist_songs(self, playlist_id): \"\"\" Get a public playlist all songs", "import get_artist_url from ncm.constants import get_playlist_url class CloudApi(object): def __init__(self,", "a artist 50 hot songs :param artist_id: :return: \"\"\" url", "result = self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id): \"\"\" Get", "= self.post_request(url, params) song_url = result['data'][0]['url'] return song_url def get_hot_songs(self,", "ncm.constants import get_song_url from ncm.constants import get_album_url from ncm.constants import", "= result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id): \"\"\" Get a", "Get a public playlist all songs :param playlist_id: :return: \"\"\"", "result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\" Get a public playlist all", "get_song_url from ncm.constants import get_album_url from ncm.constants import get_artist_url from", "song info by song id :param song_id: :return: \"\"\" url", "song_id): \"\"\" Get song info by song id :param song_id:", "= get_song_url(song_id) result = self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id):", "= get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id):", "import requests from ncm.encrypt import encrypted_request from ncm.constants import headers", "CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout", "url = get_song_url(song_id) result = self.get_request(url) return result['songs'][0] def get_album_songs(self,", "\"\"\" Get all album songs info by album id :param", "from ncm.encrypt import encrypted_request from ncm.constants import headers from ncm.constants", "return result['songs'][0] def get_album_songs(self, album_id): \"\"\" Get all album songs", "return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a song's download", "bit_rate=320000): \"\"\"Get a song's download url. :params song_id: song id<int>.", "= encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout) result = response.json()", "get {}'.format(result, url)) else: return result def post_request(self, url, params):", "encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout) result = response.json() if", "\"\"\" Get a public playlist all songs :param playlist_id: :return:", "import get_playlist_url class CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session =", "bit_rate, 'csrf_token': csrf} result = self.post_request(url, params) song_url = result['data'][0]['url']", "params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result =", "from ncm.constants import get_song_url from ncm.constants import get_album_url from ncm.constants", "public playlist all songs :param playlist_id: :return: \"\"\" url =", "128k': 128000, 'HD 320k': 320000} :return: \"\"\" url = song_download_url", "result['code'] != 200: print('Return {} when try to get {}'.format(result,", "result = self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get", ":param artist_id: :return: \"\"\" url = get_artist_url(artist_id) result = self.get_request(url)", "# -*- coding: utf-8 -*- import requests from ncm.encrypt import", ":return: \"\"\" url = get_song_url(song_id) result = self.get_request(url) return result['songs'][0]", ":params song_id: song id<int>. :params bit_rate: {'MD 128k': 128000, 'HD", "import get_album_url from ncm.constants import get_artist_url from ncm.constants import get_playlist_url", "get_request(self, url): response = self.session.get(url, timeout=self.timeout) result = response.json() if", "= self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\" Get a", "self.timeout = timeout def get_request(self, url): response = self.session.get(url, timeout=self.timeout)", "bit_rate: {'MD 128k': 128000, 'HD 320k': 320000} :return: \"\"\" url", "result = self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\" Get", "\"\"\" url = get_playlist_url(playlist_id) result = self.get_request(url) return result['playlist']['trackIds'], result['playlist']['name']", "songs info by album id :param album_id: :return: \"\"\" url", "= get_album_url(album_id) result = self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id,", "data=data, timeout=self.timeout) result = response.json() if result['code'] != 200: print('Return", "get_playlist_songs(self, playlist_id): \"\"\" Get a public playlist all songs :param", "print('Return {} when try to post {} => {}'.format(result, params,", "artist_id: :return: \"\"\" url = get_artist_url(artist_id) result = self.get_request(url) return", "song_id: :return: \"\"\" url = get_song_url(song_id) result = self.get_request(url) return", ":param album_id: :return: \"\"\" url = get_album_url(album_id) result = self.get_request(url)", "ncm.constants import song_download_url from ncm.constants import get_song_url from ncm.constants import", "all songs :param playlist_id: :return: \"\"\" url = get_playlist_url(playlist_id) result", "from ncm.constants import get_artist_url from ncm.constants import get_playlist_url class CloudApi(object):", "\"\"\" url = get_album_url(album_id) result = self.get_request(url) return result['album']['songs'] def", "params): data = encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout) result", "songs :param playlist_id: :return: \"\"\" url = get_playlist_url(playlist_id) result =", "def post_request(self, url, params): data = encrypted_request(params) response = self.session.post(url,", "ncm.constants import get_playlist_url class CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session", "= self.session.post(url, data=data, timeout=self.timeout) result = response.json() if result['code'] !=", "get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a song's download url. :params song_id:", "ncm.constants import headers from ncm.constants import song_download_url from ncm.constants import", "import headers from ncm.constants import song_download_url from ncm.constants import get_song_url", "get_album_url(album_id) result = self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000):", "super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout = timeout def get_request(self,", "{} when try to get {}'.format(result, url)) else: return result", "result['code'] != 200: print('Return {} when try to post {}", "download url. :params song_id: song id<int>. :params bit_rate: {'MD 128k':", "{'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url, params)", "\"\"\"Get a song's download url. :params song_id: song id<int>. :params", "all album songs info by album id :param album_id: :return:", "result = self.post_request(url, params) song_url = result['data'][0]['url'] return song_url def", "url = get_album_url(album_id) result = self.get_request(url) return result['album']['songs'] def get_song_url(self,", "to post {} => {}'.format(result, params, url)) else: return result", "info by album id :param album_id: :return: \"\"\" url =", "from ncm.constants import song_download_url from ncm.constants import get_song_url from ncm.constants", "a public playlist all songs :param playlist_id: :return: \"\"\" url", "{}'.format(result, params, url)) else: return result def get_song(self, song_id): \"\"\"", "def get_song(self, song_id): \"\"\" Get song info by song id", "album id :param album_id: :return: \"\"\" url = get_album_url(album_id) result", "song_url = result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id): \"\"\" Get", "a song's download url. :params song_id: song id<int>. :params bit_rate:", ":return: \"\"\" url = get_playlist_url(playlist_id) result = self.get_request(url) return result['playlist']['trackIds'],", "from ncm.constants import headers from ncm.constants import song_download_url from ncm.constants", "csrf = '' params = {'ids': [song_id], 'br': bit_rate, 'csrf_token':", "\"\"\" url = song_download_url csrf = '' params = {'ids':", "= self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id): \"\"\" Get all", "get_song_url(song_id) result = self.get_request(url) return result['songs'][0] def get_album_songs(self, album_id): \"\"\"", "id :param album_id: :return: \"\"\" url = get_album_url(album_id) result =", "200: print('Return {} when try to post {} => {}'.format(result,", "200: print('Return {} when try to get {}'.format(result, url)) else:", "utf-8 -*- import requests from ncm.encrypt import encrypted_request from ncm.constants", "get_artist_url from ncm.constants import get_playlist_url class CloudApi(object): def __init__(self, timeout=30):", "= '' params = {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf}", "320k': 320000} :return: \"\"\" url = song_download_url csrf = ''", "url, params): data = encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout)", "encrypted_request from ncm.constants import headers from ncm.constants import song_download_url from", "self.get_request(url) return result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a song's", "song_url def get_hot_songs(self, artist_id): \"\"\" Get a artist 50 hot", "song_download_url from ncm.constants import get_song_url from ncm.constants import get_album_url from", "else: return result def post_request(self, url, params): data = encrypted_request(params)", "artist_id): \"\"\" Get a artist 50 hot songs :param artist_id:", "__init__(self, timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout = timeout", "when try to post {} => {}'.format(result, params, url)) else:", "song id<int>. :params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000}", "get_playlist_url class CloudApi(object): def __init__(self, timeout=30): super().__init__() self.session = requests.session()", "Get song info by song id :param song_id: :return: \"\"\"", "get_album_songs(self, album_id): \"\"\" Get all album songs info by album", "result['album']['songs'] def get_song_url(self, song_id, bit_rate=320000): \"\"\"Get a song's download url.", "Get all album songs info by album id :param album_id:", "print('Return {} when try to get {}'.format(result, url)) else: return", "timeout def get_request(self, url): response = self.session.get(url, timeout=self.timeout) result =", "artist 50 hot songs :param artist_id: :return: \"\"\" url =", "{}'.format(result, url)) else: return result def post_request(self, url, params): data", "by song id :param song_id: :return: \"\"\" url = get_song_url(song_id)", "song_download_url csrf = '' params = {'ids': [song_id], 'br': bit_rate,", "\"\"\" url = get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs'] def", "get_artist_url(artist_id) result = self.get_request(url) return result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\"", "ncm.constants import get_album_url from ncm.constants import get_artist_url from ncm.constants import", "= {'ids': [song_id], 'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url,", "playlist all songs :param playlist_id: :return: \"\"\" url = get_playlist_url(playlist_id)", "= response.json() if result['code'] != 200: print('Return {} when try", "data = encrypted_request(params) response = self.session.post(url, data=data, timeout=self.timeout) result =", "[song_id], 'br': bit_rate, 'csrf_token': csrf} result = self.post_request(url, params) song_url", "if result['code'] != 200: print('Return {} when try to get", "params) song_url = result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id): \"\"\"", "50 hot songs :param artist_id: :return: \"\"\" url = get_artist_url(artist_id)", ":params bit_rate: {'MD 128k': 128000, 'HD 320k': 320000} :return: \"\"\"", "info by song id :param song_id: :return: \"\"\" url =", "album songs info by album id :param album_id: :return: \"\"\"", "else: return result def get_song(self, song_id): \"\"\" Get song info", "csrf} result = self.post_request(url, params) song_url = result['data'][0]['url'] return song_url", "from ncm.constants import get_album_url from ncm.constants import get_artist_url from ncm.constants", "timeout=30): super().__init__() self.session = requests.session() self.session.headers.update(headers) self.timeout = timeout def", "import song_download_url from ncm.constants import get_song_url from ncm.constants import get_album_url", "320000} :return: \"\"\" url = song_download_url csrf = '' params", "return result['hotSongs'] def get_playlist_songs(self, playlist_id): \"\"\" Get a public playlist", "ncm.constants import get_artist_url from ncm.constants import get_playlist_url class CloudApi(object): def", "\"\"\" Get song info by song id :param song_id: :return:", "= requests.session() self.session.headers.update(headers) self.timeout = timeout def get_request(self, url): response", ":return: \"\"\" url = get_album_url(album_id) result = self.get_request(url) return result['album']['songs']", "result['data'][0]['url'] return song_url def get_hot_songs(self, artist_id): \"\"\" Get a artist" ]
[ "key value in the right subtree of the node having", "`_get` and return `True` if `_get` returns a value, or", "we begin our implementation with the `put` functionality. In order", "operator. One of the more interesting methods of `TreeNode` provides", "the key we want to delete, there are three cases", "else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\" ![Deleting node", "tree. If the tree has more than one node we", "be installed. - To add a node to the tree,", "will simply call `_get` and return `True` if `_get` returns", "We call this node the **successor**, and we will look", "= val self.left = left self.right = right if self.left:", "than one child, so we know how to remove it", "of the left child to point to the parent of", "help to classify a node according to its own position", "bug as an exercise for you. \"\"\" class BinarySearchTree(object): TreeNodeClass", "the successor is shown above and as you can see", "child of its parent, and itself has no right child,", "when deleting a node from a binary search tree. The", "is the smallest key in the right subtree. 2. If", "that duplicate keys are not handled properly. As our tree", "is the most difficult case to handle (see below). If", "node.find_successor() if successor: successor.splice_out() node.key = successor.key node.val = successor.val", "val, parent=node) else: if node.right: self._put(key, val, node.right) else: node.right", "ignore for now as we will return to them later", "retrieval of a value for a given key. The `get`", "code you will see that there are six cases to", "node.parent.left = None else: node.parent.right = None \"\"\" ![Deleting Node", "overrides the `for x in` operation for iteration, it really", "node_to_remove = self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size = self.size", "elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right = promoted_node else: node.replace_node_data(", "the node having the original key. The result of this", "if self.is_leaf(): if self.is_left_child(): self.parent.left = None else: self.parent.right =", "key. When a matching key is found, the value stored", "`_get` method uses the same logic for choosing the left", "to handle the third case is shown below. Notice that", "= self return successor def find_min(self): current = self while", "if self.is_left_child(): return self.parent self.parent.right = None successor = self.parent.find_successor()", "We leave fixing this bug as an exercise for you.", "= self._get(key, self.root) if result: return result.val raise KeyError def", "while current.left: current = current.left return current def splice_out(self): if", "if self is None: return if self.left: # `in` calls", "the root of the tree. If a root node is", "and we will look at a way to find the", "installed. - To add a node to the tree, create", "`_get` method returns a `TreeNode` to `__getitem__`, this allows `_get`", "self.right: return self.right.find_min() if self.parent is None: return None if", "== 1 and self.root.key == key: self.root = None self.size", "self.parent and self.parent.left == self def is_right_child(self): return self.parent and", "is a node that will preserve the binary search tree", "fact, because `__iter__` overrides the `for x in` operation for", "`TreeNode` provides an interface to simply iterate over all the", "our implementation with the `put` functionality. In order to enable", "an exercise for you. \"\"\" class BinarySearchTree(object): TreeNodeClass = TreeNode", "why this is important when we discuss the implementation for", "found, the value stored in the val of the node", "for deletion. What we need is a node that will", "a right child then we only need to update the", "call `delete` recursively, but then we would waste time re-searching", "= parent def is_left_child(self): return self.parent and self.parent.left == self", "children](figures/binary-search-tree-delete-3.png) The code to handle the third case is shown", "data by calling the `replace_node_data` method on the root. Code", "search tree is the leftmost child of the tree. Therefore", "2. The node to be deleted has only one child", "to have no more than one child, so we know", "if node == node.parent.left: node.parent.left = None else: node.parent.right =", "'foo'` style assignment interface for our `BinarySearchTree` instances, we override", "child reference of the parent to point to the current", "at first glance you might think that the code is", "self.left or self.right if self.is_left_child(): self.parent.left = promoted_node else: self.parent.right", "but then we would waste time re-searching for the key", "a method of the `TreeNode` class. This code makes use", "for this case is shown in the next code sample.", "node): if not node: return None if node.key == key:", "that we must consider: 1. The node to be deleted", "![Deleting node 5, a node with two children](figures/binary-search-tree-delete-3.png) The code", "have our `TreeNode` class we can begin to write `BinarySearchTree`", "val, node.left) else: node.left = self.TreeNodeClass(key, val, parent=node) else: if", "`_put` to search the tree according to the following algorithm:", "key. The `get` functionality is even easier than the `put`", "is that duplicate keys are not handled properly. As our", "we get to a non-matching leaf node or find a", "used as a flexible helper method for other `BinarySearchTree` methods", "update the parent reference of the left child to point", "task is to find the node to delete by searching", "return None if node.key == key: return node if key", "over all the keys in the tree in order. You", "parent to point to the current node’s right child. 3.", "TreeNode(object): def __init__(self, key, val, left=None, right=None, parent=None): self.key =", "references in each node of the subtree until it reaches", "a `TreeNode`, along with these helper functions, is shown below.", "the current node has no children all we need to", "search tree relationships for both of the existing left and", "of a root node, and delegates the core `get` functionality", "has no right child, then the successor to this node", "= right self.parent = parent def is_left_child(self): return self.parent and", "helper function `_put` to search the tree according to the", "16, a node without children](figures/binary-search-tree-delete-1.png) The second case is only", "code makes use of the same properties of binary search", "the original key. The result of this is that the", "you can ignore for now as we will return to", "the node’s place. We can, however, search the tree for", "right subtree of the node having the original key. The", "raises an error. \"\"\" def delete(self, key): if self.size >", "current = self while current.left: current = current.left return current", "the minimum valued key in any binary search tree is", "is None: return if self.left: # `in` calls `__iter__` so", "given key. The `get` functionality is even easier than the", "right subtrees. The node that will do this is the", "for iteration, it really is recursive! Our full implementation of", "the tree from smallest to largest. There are three cases", "= node.parent node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent = node.parent", "self.size def __iter__(self): return self.root.__iter__() def __setitem__(self, key, val): if", "a new node with the same key value in the", "The first case is straightforward. If the current node has", "search, we have found the position in the tree where", "return if self.left: # `in` calls `__iter__` so is recursive", "key = 19](figures/binary-search-tree-put.png) Once the tree is constructed, the next", "delete the node and remove the reference to this node", "method uses the same logic for choosing the left or", "\"\"\" Now that we have our `TreeNode` class we can", "child 3. The node to be deleted has two children", "The first condition is the only one that matters for", "to search the tree according to the following algorithm: -", "discuss the case where the current node has a left", "left, right): self.key = key self.val = val self.left =", "we need to do is delete the node and remove", "is that the node with the new key will never", "to consider when looking for the successor: 1. If the", "`TreeNode` is provided below. It includes three further methods `find_successor`,", "place of its parent. The code for this case is", "much easier. The constructor for a `TreeNode`, along with these", "of a key. The first task is to find the", "is only slightly more complicated (see below). If a node", "right child, then the successor to this node is the", "a node has only a single child, then we can", "this case is shown below. \"\"\" def remove(self, node): if", "deletion of a key. The first task is to find", "with our implementation of insert is that duplicate keys are", "of this class will be to enable `put`ing to and", "the value stored in the val of the node is", "is even easier than the `put` functionality because we simply", "in the previous step. The code below shows the Python", "splice out and makes the right changes. We could call", "the next-largest key in the tree. We call this node", "reaches a node that does not have a left child.", "Python code for inserting a new node in the tree.", "To remove the successor, we make use of the method", "instances, we override the `__setitem__` magic method. In this method", "is_right_child(self): return self.parent and self.parent.right == self def is_leaf(self): return", "= right if self.left: self.left.parent = self if self.right: self.right.parent", "to do is delete the node and remove the reference", "__init__(self, key, val, left=None, right=None, parent=None): self.key = key self.val", "left self.right = right if self.left: self.left.parent = self if", "third case is shown below. Notice that we make use", "sample. As you look at this code you will see", "If the new key is less than the current node,", "returns a value, or `False` if it returns `None`. The", "it must be the root. In this case we will", "left and right subtrees. The node that will do this", "class BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self): self.root = None", "`find_min` method simply follows the `left` references in each node", "find_min(self): current = self while current.left: current = current.left return", "(or right) child to search, we have found the position", "in the tree. We call this node the **successor**, and", "the `_get` method uses the same logic for choosing the", "the `replace_node_data` method on the root. Code for this decision", "until it reaches a node that does not have a", "`_get` to be used as a flexible helper method for", "node to be deleted has only one child 3. The", "new key is less than the current node, search the", "you can see, many of these helper functions help to", "self.root = None self.size = self.size - 1 return raise", "`__getitem__`. Just like with `__setitem__`, the primary purpose of this", "the successor is the smallest key in the right subtree.", "the place of its parent. The code for this case", "parent as an attribute of each node. You will see", "override the `__setitem__` magic method. In this method we first", "the new key to replace the old value. We leave", "must check to make sure the key of the root", "just replace the `key`, `val`, `left`, and `right` data by", "= self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size = self.size -", "order, using the `inorder` traversal algorithm. However, because we want", "you might think that the code is not recursive: in", "= None self.size = self.size - 1 return raise KeyError('Error,", "node.left or node.right if node.is_left_child(): promoted_node.parent = node.parent node.parent.left =", "or node.right if node.is_left_child(): promoted_node.parent = node.parent node.parent.left = promoted_node", "it in the tree in place of the node to", "`left`, and `right` data by calling the `replace_node_data` method on", "of its parent. The code for this case is shown", "want our iterator to operate lazily, in this case we", "BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self): self.root = None self.size", "a node that can be used to replace the one", "it goes directly to the node we want to splice", "the `del` operator raises an error. \"\"\" def delete(self, key):", "successor: successor.splice_out() node.key = successor.key node.val = successor.val \"\"\" The", "look at a way to find the successor shortly. The", "19](figures/binary-search-tree-put.png) Once the tree is constructed, the next task is", "private, recursive, helper function `_put` to search the tree according", "When there is no left (or right) child to search,", "key): return bool(self._get(key, self.root)) \"\"\" Finally, we turn our attention", "to find the successor. To remove the successor, we make", "return not (self.right or self.left) def has_any_children(self): return self.right or", "None \"\"\" ![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png) The", "use `splice_out` is that it goes directly to the node", "that can be used to replace the one scheduled for", "following the steps outlined above. Notice that when a new", "has two children, then it is unlikely that we can", "self.right.parent = self def __iter__(self): if self is None: return", "can see is a method of the `TreeNode` class. This", "if self.left: self.left.parent = self if self.right: self.right.parent = self", "the new key is greater than the current node, search", "will create a new node with the same key value", "helper method for other `BinarySearchTree` methods that may need to", "but we still must check to make sure the key", "In this method we first check to see if the", "right child as the `_put` method. Notice that the `_get`", "node has. The `TreeNode` class will also explicitly keep track", "tree is constructed, the next task is to implement the", "the key that is to be deleted. In either case", "to make use of other data from the `TreeNode` besides", "check to see if the tree already has a root.", "children](figures/binary-search-tree-delete-1.png) The second case is only slightly more complicated (see", "that the code is not recursive: in fact, because `__iter__`", "child to take the place of its parent. The code", "to find the successor is shown above and as you", "left child. The decision proceeds as follows: 1. If the", "In this case we will just replace the `key`, `val`,", "replace the one scheduled for deletion. What we need is", "a matching key. When a matching key is found, the", "we must consider: 1. The node to be deleted has", "= None else: self.parent.right = None else: promoted_node = self.left", "that we make use of the helper methods `find_successor` and", "in a subtree. You should convince yourself that the minimum", "the successor. To remove the successor, we make use of", "key self.val = val self.left = left self.right = right", "None else: self.parent.right = None else: promoted_node = self.left or", "the root. In this case we will just replace the", "node. If the new key is less than the current", "to define our `__iter__` method as a Python generator. Pay", "insert is that duplicate keys are not handled properly. As", "method simply follows the `left` references in each node of", "parent. The code for this case is shown in the", "we simply put it in the tree in place of", "really is recursive! Our full implementation of `TreeNode` is provided", "self.parent and self.parent.right == self def is_leaf(self): return not (self.right", "been removed, we simply put it in the tree in", "`get`ing from the tree, so we begin our implementation with", "the leftmost child of the tree. Therefore the `find_min` method", "`__contains__` method will simply call `_get` and return `True` if", "writing a `__contains__` method for the `BinarySearchTree`. The `__contains__` method", "node we want to splice out and makes the right", "self.TreeNodeClass(key, val) self.size = self.size + 1 def _put(self, key,", "itself has no right child, then the successor to this", "a value for a given key. The `get` functionality is", "the `del` operator. One of the more interesting methods of", "is straightforward. If the current node has no children all", "the right changes. We could call `delete` recursively, but then", "in place then `put` calls the private, recursive, helper function", "`find_successor` and `find_min` to find the successor. To remove the", "and the kind of children the node has. The `TreeNode`", "this method we first check to see if the tree", "child. 3. If the current node has no parent, it", "will just replace the `key`, `val`, `left`, and `right` data", "root of the tree, search the binary tree comparing the", "and remove the reference to this node in the parent.", "the two cases for deletion that we have already implemented.", "a root node is already in place then `put` calls", "step. The code below shows the Python code for inserting", "if key < node.key: if node.left: self._put(key, val, node.left) else:", "a node from a binary search tree. The `find_min` method", "above. Notice that when a new child is inserted into", "to handle (see below). If a node has two children,", "for you. \"\"\" class BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self):", "the position in the tree where the new node should", "\"\"\" def __contains__(self, key): return bool(self._get(key, self.root)) \"\"\" Finally, we", "method returns a `TreeNode` to `__getitem__`, this allows `_get` to", "key is found, the value stored in the val of", "consider: 1. The node to be deleted has no children", "`tree[1] = 'foo'` style assignment interface for our `BinarySearchTree` instances,", "that has a single child](figures/binary-search-tree-delete-2.png) The third case is the", "provided below. It includes three further methods `find_successor`, `find_min` and", "then `put` calls the private, recursive, helper function `_put` to", "to consider. Since the cases are symmetric with respect to", "self.size - 1 return raise KeyError('Error, key not in tree')", "recursively following the steps outlined above. Notice that when a", "node is a left child then we only need to", "itself. Recall that the core functionality of this class will", "key that is to be deleted. In either case if", "As you look at this code you will see that", "node. The first condition is the only one that matters", "key will create a new node with the same key", "`TreeNode` class provides many helper functions that make the work", "None: return None if self.is_left_child(): return self.parent self.parent.right = None", "The `get` functionality is even easier than the `put` functionality", "that make the work done in the `BinarySearchTree` class methods", "during the insertion process. ![Inserting a node with key =", "self.root: self._put(key, val, self.root) else: self.root = self.TreeNodeClass(key, val) self.size", "return self.right or self.left def has_both_children(self): return self.right and self.left", "for a node that can be used to replace the", "first case is straightforward. If the current node has no", "and self.left def has_one_child(self): return self.has_any_children() and not self.has_both_children() def", "`BinarySearchTree` methods that may need to make use of other", "What we need is a node that will preserve the", "we discuss the implementation for the `del` operator. One of", "= self.size - 1 return raise KeyError('Error, key not in", "for the key node. \"\"\" else: # has both children", "new key to replace the old value. We leave fixing", "existing left and right subtrees. The node that will do", "third case is the most difficult case to handle (see", "is shown below. Notice that we make use of the", "because `__iter__` overrides the `for x in` operation for iteration,", "out and makes the right changes. We could call `delete`", "to delete by searching the tree. If the tree has", "tree, create a new `TreeNode` object and insert the object", "next code sample. As you look at this code you", "will return to them later when discussing deletion. \"\"\" class", "= 19](figures/binary-search-tree-put.png) Once the tree is constructed, the next task", "the smallest key in the right subtree. 2. If the", "outlined above. Notice that when a new child is inserted", "are three cases that we must consider: 1. The node", "self.key if self.right: # recurse again for elem in self.right:", "successor = self.parent.find_successor() self.parent.right = self return successor def find_min(self):", "of the current node, and then update the right child", "write `BinarySearchTree` itself. Recall that the core functionality of this", "remove the reference to this node in the parent. The", "bool(self._get(key, self.root)) \"\"\" Finally, we turn our attention to the", "implementation as at first glance you might think that the", "else: promoted_node = self.left or self.right if self.is_left_child(): self.parent.left =", "a node to the tree, create a new `TreeNode` object", "be removed. If the tree only has a single node,", "method we first check to see if the tree already", "to the node we want to splice out and makes", "self.size = 0 def __len__(self): return self.size def __iter__(self): return", "x in` operation for iteration, it really is recursive! Our", "promoted_node else: self.parent.right = promoted_node promoted_node.parent = self.parent \"\"\" Now", "minimum valued key in any binary search tree is the", "self.parent = parent def is_left_child(self): return self.parent and self.parent.left ==", "subtree until it reaches a node that does not have", "generator. Pay close attention to the `__iter__` implementation as at", "deleted. In either case if the key is not found", "right child then we only need to update the parent", "case to handle (see below). If a node has two", "None else: promoted_node = self.left or self.right if self.is_left_child(): self.parent.left", "node.parent is not None: if node == node.parent.left: node.parent.left =", "= promoted_node elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right = promoted_node", "if self.size > 1: node_to_remove = self._get(key, self.root) if node_to_remove:", "or self.left) def has_any_children(self): return self.right or self.left def has_both_children(self):", "splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left = None else: self.parent.right", "can be used to replace the one scheduled for deletion.", "original key. The result of this is that the node", "matching key is found, the value stored in the val", "self.size > 1: node_to_remove = self._get(key, self.root) if node_to_remove: self.remove(node_to_remove)", "the right child of its parent, and itself has no", "know how to remove it using the two cases for", "the right child reference of the parent to point to", "self.val = val self.left = left self.right = right self.parent", "key, val, left=None, right=None, parent=None): self.key = key self.val =", "`delete` recursively, but then we would waste time re-searching for", "self.right if self.is_left_child(): self.parent.left = promoted_node else: self.parent.right = promoted_node", "right child. 3. If the current node has no parent,", "is greater than the current node, search the right subtree.", "of the tree. Therefore the `find_min` method simply follows the", "\"\"\" class BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self): self.root =", "needs to be removed. If the tree only has a", "right child and is the left child of its parent,", "search the left subtree. If the new key is greater", "be the root. In this case we will just replace", "`right` data by calling the `replace_node_data` method on the root.", "interface to simply iterate over all the keys in the", "parent of the current node, and then update the right", "to find the minimum key in a subtree. You should", "have already implemented. Once the successor has been removed, we", "binary search tree is the leftmost child of the tree.", "primary purpose of this method is to handle presence and", "self.parent.right = self return successor def find_min(self): current = self", "the tree recursively until we get to a non-matching leaf", "non-matching leaf node or find a matching key. When a", "in the `_get` method uses the same logic for choosing", "not node: return None if node.key == key: return node", "tree, but we still must check to make sure the", "self.is_left_child(): return self.parent self.parent.right = None successor = self.parent.find_successor() self.parent.right", "yield self.key if self.right: # recurse again for elem in", "else: node.right = self.TreeNodeClass(key, val, parent=node) \"\"\" The diagram below", "self.right or self.left def has_both_children(self): return self.right and self.left def", "if `_get` returns a value, or `False` if it returns", "the `put` functionality. In order to enable the `tree[1] =", "having the original key. The result of this is that", "tree where the new node should be installed. - To", "already know how to traverse a binary tree in order,", "= self if self.right: self.right.parent = self def __iter__(self): if", "`False` if it returns `None`. The code for `__contains__` is", "return self.parent and self.parent.left == self def is_right_child(self): return self.parent", "order to enable the `tree[1] = 'foo'` style assignment interface", "`splice_out`. The reason we use `splice_out` is that it goes", "promoted_node.parent = node.parent node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent =", "insertion of a duplicate key is for the value associated", "are three cases to consider when looking for the successor:", "methods that may need to make use of other data", "and `splice_out` which you can ignore for now as we", "key we want to delete, there are three cases that", "any binary search tree is the leftmost child of the", "be deleted has two children The first case is straightforward.", "and itself has no right child, then the successor to", "with respect to either having a left or right child", "![Deleting node 25, a node that has a single child](figures/binary-search-tree-delete-2.png)", "have no more than one child, so we know how", "self.right: self.right.parent = self def __iter__(self): if self is None:", "the current node has a left child. The decision proceeds", "important when we discuss the implementation for the `del` operator.", "right=None, parent=None): self.key = key self.val = val self.left =", "then the successor to this node is the successor of", "to this node is the successor of its parent, excluding", "of the same properties of binary search trees that cause", "else: self.parent.right = None else: promoted_node = self.left or self.right", "find the `TreeNode` that needs to be removed. If the", "discussing deletion. \"\"\" class TreeNode(object): def __init__(self, key, val, left=None,", "val): if self.root: self._put(key, val, self.root) else: self.root = self.TreeNodeClass(key,", "is the leftmost child of the tree. Therefore the `find_min`", "stored in the val of the node is returned. Again,", "excluding this node. The first condition is the only one", "**successor**, and we will look at a way to find", "the key is not found the `del` operator raises an", "only need to update the parent reference of the left", "search the tree recursively until we get to a non-matching", "5, a node with two children](figures/binary-search-tree-delete-3.png) The code to handle", "`TreeNode` class we can begin to write `BinarySearchTree` itself. Recall", "Again, inorder to enable a `tree[1]` retrieval interface, we overload", "key not in tree') def __delitem__(self, key): self.delete(key) \"\"\" Once", "to point to the current node’s left child. 2. If", "this is that the node with the new key will", "def _put(self, key, val, node): if key < node.key: if", "the left or right child as the `_put` method. Notice", "might think that the code is not recursive: in fact,", "search the tree for a node that can be used", "has a root. If there is not a root then", "the left subtree. If the new key is greater than", "if node.key == key: return node if key < node.key:", "implemented. Once the successor has been removed, we simply put", "a value, or `False` if it returns `None`. The code", "provides many helper functions that make the work done in", "see, many of these helper functions help to classify a", "self.has_both_children() def replace_node_data(self, key, val, left, right): self.key = key", "def replace_node_data(self, key, val, left, right): self.key = key self.val", "that the `_get` method returns a `TreeNode` to `__getitem__`, this", "right child reference of the parent to point to the", "node.parent.right = None \"\"\" ![Deleting Node 16, a node without", "of the parent to point to the current node’s right", "we can simply promote one of them to take the", "A better way to handle the insertion of a duplicate", "place. We can, however, search the tree for a node", "= node.find_successor() if successor: successor.splice_out() node.key = successor.key node.val =", "node has only a single child, then we can simply", "current.left: current = current.left return current def splice_out(self): if self.is_leaf():", "\"\"\" The `TreeNode` class provides many helper functions that make", "to replace the old value. We leave fixing this bug", "relationships for both of the existing left and right subtrees.", "key is less than the current node, search the left", "child then we only need to update the parent reference", "replace_node_data(self, key, val, left, right): self.key = key self.val =", "has. The `TreeNode` class will also explicitly keep track of", "nodes that were visited during the insertion process. ![Inserting a", "two children](figures/binary-search-tree-delete-3.png) The code to handle the third case is", "= self.left or self.right if self.is_left_child(): self.parent.left = promoted_node else:", "-*- coding: utf-8 -*- \"\"\" The `TreeNode` class provides many", "node 25, a node that has a single child](figures/binary-search-tree-delete-2.png) The", "current node’s left child. 2. If the current node is", "def has_one_child(self): return self.has_any_children() and not self.has_both_children() def replace_node_data(self, key,", "self.parent is None: return None if self.is_left_child(): return self.parent self.parent.right", "in fact, because `__iter__` overrides the `for x in` operation", "is guaranteed to have no more than one child, so", "already has a root. If there is not a root", "no right child and is the left child of its", "we want to splice out and makes the right changes.", "to update the parent reference of the right child to", "to be deleted has two children The first case is", "not None: if node == node.parent.left: node.parent.left = None else:", "put it in the tree in place of the node", "how to traverse a binary tree in order, using the", "as the `_put` method. Notice that the `_get` method returns", "cases for deletion that we have already implemented. Once the", "\"\"\" Finally, we turn our attention to the most challenging", "successor is shown above and as you can see is", "the new tree as the parent. One important problem with", "if node.right: self._put(key, val, node.right) else: node.right = self.TreeNodeClass(key, val,", "a left child then we only need to update the", "`in` operation by writing a `__contains__` method for the `BinarySearchTree`.", "When a matching key is found, the value stored in", "remove the successor, we make use of the method `splice_out`.", "leaf node or find a matching key. When a matching", "implementation of `TreeNode` is provided below. It includes three further", "left or right child as the `_put` method. Notice that", "to simply iterate over all the keys in the tree", "the point discovered in the previous step. The code below", "the current node, and then update the right child reference", "that matters for us when deleting a node from a", "case is shown in the next code sample. As you", "return successor def find_min(self): current = self while current.left: current", "so we know how to remove it using the two", "`__contains__` method for the `BinarySearchTree`. The `__contains__` method will simply", "in the tree from smallest to largest. There are three", "not handled properly. As our tree is implemented a duplicate", "greater than the current node, search the right subtree. -", "is_leaf(self): return not (self.right or self.left) def has_any_children(self): return self.right", "= left self.right = right if self.left: self.left.parent = self", "node the **successor**, and we will look at a way", "has both children successor = node.find_successor() if successor: successor.splice_out() node.key", "each node of the subtree until it reaches a node", "and node.parent is not None: if node == node.parent.left: node.parent.left", "3. If the node is the right child of its", "and delegates the core `get` functionality to `_get`. The search", "find the node to delete by searching the tree. If", "having a left or right child we will just discuss", "the nodes that were visited during the insertion process. ![Inserting", "search tree: the deletion of a key. The first task", "We can, however, search the tree for a node that", "object and insert the object at the point discovered in", "node and remove the reference to this node in the", "`True` if `_get` returns a value, or `False` if it", "point to the parent of the current node, and then", "a binary search tree. The lightly shaded nodes indicate the", "the tree according to the following algorithm: - Starting at", "binary tree comparing the new key to the key in", "def __setitem__(self, key, val): if self.root: self._put(key, val, self.root) else:", "`TreeNode` that needs to be removed. If the tree only", "problem with our implementation of insert is that duplicate keys", "we would waste time re-searching for the key node. \"\"\"", "= self.TreeNodeClass(key, val) self.size = self.size + 1 def _put(self,", "self.parent.right = promoted_node promoted_node.parent = self.parent \"\"\" Now that we", "methods much easier. The constructor for a `TreeNode`, along with", "attribute of each node. You will see why this is", "binary search tree. The lightly shaded nodes indicate the nodes", "val. \"\"\" def __getitem__(self, key): if self.root: result = self._get(key,", "1: node_to_remove = self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size =", "Once the successor has been removed, we simply put it", "because we want our iterator to operate lazily, in this", "node. \"\"\" else: # has both children successor = node.find_successor()", "has been removed, we simply put it in the tree", "of them to take the node’s place. We can, however,", "a right child, then the successor is the smallest key", "make the work done in the `BinarySearchTree` class methods much", "= node.left or node.right if node.is_left_child(): promoted_node.parent = node.parent node.parent.left", "the steps outlined above. Notice that when a new child", "If the tree has more than one node we search", "or `False` if it returns `None`. The code for `__contains__`", "to update the parent reference of the left child to", "that has the next-largest key in the tree. We call", "return self.right and self.left def has_one_child(self): return self.has_any_children() and not", "is to find the node to delete by searching the", "node.key = successor.key node.val = successor.val \"\"\" The code to", "node from a binary search tree. The `find_min` method is", "`TreeNode` class. This code makes use of the same properties", "a duplicate key will create a new node with the", "`BinarySearchTree` instances, we override the `__setitem__` magic method. In this", "The node that will do this is the node that", "so is recursive for elem in self.left: yield elem yield", "the parent reference of the left child to point to", "as we will return to them later when discussing deletion.", "and `right` data by calling the `replace_node_data` method on the", "return self.parent self.parent.right = None successor = self.parent.find_successor() self.parent.right =", "to the parent of the current node, and then update", "We could call `delete` recursively, but then we would waste", "children all we need to do is delete the node", "allows `_get` to be used as a flexible helper method", "are not handled properly. As our tree is implemented a", "the keys in the tree in order. You already know", "for choosing the left or right child as the `_put`", "to largest. There are three cases to consider when looking", "the same properties of binary search trees that cause an", "will be to enable `put`ing to and `get`ing from the", "duplicate key is for the value associated with the new", "for the `BinarySearchTree`. The `__contains__` method will simply call `_get`", "at the root of the tree, search the binary tree", "delegates the core `get` functionality to `_get`. The search code", "of Python’s magic methods—in this case `__getitem__`. Just like with", "coding: utf-8 -*- \"\"\" The `TreeNode` class provides many helper", "key < node.key: if node.left: self._put(key, val, node.left) else: node.left", "for a given key. The `get` functionality is even easier", "1. If the current node is a left child then", "`_get` returns a value, or `False` if it returns `None`.", "three cases that we must consider: 1. The node to", "also explicitly keep track of the parent as an attribute", "the root of the tree, but we still must check", "root of the tree. If a root node is already", "However, because we want our iterator to operate lazily, in", "def has_both_children(self): return self.right and self.left def has_one_child(self): return self.has_any_children()", "self.left.parent = self if self.right: self.right.parent = self def __iter__(self):", "the key in the current node. If the new key", "`find_successor`, `find_min` and `splice_out` which you can ignore for now", "\"\"\" def __getitem__(self, key): if self.root: result = self._get(key, self.root)", "left subtree. If the new key is greater than the", "the old value. We leave fixing this bug as an", "where the current node has a left child. The decision", "and is the left child of its parent, then the", "of `TreeNode` provides an interface to simply iterate over all", "Notice that when a new child is inserted into the", "other `BinarySearchTree` methods that may need to make use of", "of `TreeNode` is provided below. It includes three further methods", "self.remove(node_to_remove) self.size = self.size - 1 return elif self.size ==", "time re-searching for the key node. \"\"\" else: # has", "an interface to simply iterate over all the keys in", "see why this is important when we discuss the implementation", "delete, there are three cases that we must consider: 1.", "new `TreeNode` and set it as the root of the", "can implement the `in` operation by writing a `__contains__` method", "is_left_child(self): return self.parent and self.parent.left == self def is_right_child(self): return", "not self.has_both_children() def replace_node_data(self, key, val, left, right): self.key =", "the tree, the `node` is passed to the new tree", "all we need to do is delete the node and", "delete by searching the tree. If the tree has more", "None: if node == node.parent.left: node.parent.left = None else: node.parent.right", "node into a binary search tree. The lightly shaded nodes", "more than one child, so we know how to remove", "= current.left return current def splice_out(self): if self.is_leaf(): if self.is_left_child():", "is the successor. 3. If the node is the right", "most challenging method in the binary search tree: the deletion", "next-largest key in the tree. We call this node the", "value, or `False` if it returns `None`. The code for", "the parent to point to the current node’s left child.", "tree from smallest to largest. There are three cases to", "the parent of the current node, and then update the", "The search code in the `_get` method uses the same", "the tree already has a root. If there is not", "node of the subtree until it reaches a node that", "symmetric with respect to either having a left or right", "can ignore for now as we will return to them", "the `left` references in each node of the subtree until", "nodes indicate the nodes that were visited during the insertion", "root matches the key that is to be deleted. In", "begin to write `BinarySearchTree` itself. Recall that the core functionality", "node, and then update the left child reference of the", "both of the existing left and right subtrees. The node", "If a node has only a single child, then we", "= None self.size = 0 def __len__(self): return self.size def", "parent reference of the left child to point to the", "the current node, search the left subtree. If the new", "binary tree in order, using the `inorder` traversal algorithm. However,", "simply promote one of them to take the node’s place.", "recursively, but then we would waste time re-searching for the", "process may look like: \"\"\" elif node.has_one_child(): promoted_node = node.left", "for deletion that we have already implemented. Once the successor", "is shown below. \"\"\" def remove(self, node): if node.is_leaf() and", "be used as a flexible helper method for other `BinarySearchTree`", "to implement the retrieval of a value for a given", "node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\" ![Deleting node 25,", "Therefore the `find_min` method simply follows the `left` references in", "current node has no parent, it must be the root.", "parent. One important problem with our implementation of insert is", "because we simply search the tree recursively until we get", "set it as the root of the tree. If a", "the next code sample. As you look at this code", "two children The first case is straightforward. If the current", "promoted_node elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right = promoted_node else:", "into a binary search tree. The lightly shaded nodes indicate", "recursive, helper function `_put` to search the tree according to", "`_get`. The search code in the `_get` method uses the", "done in the `BinarySearchTree` class methods much easier. The constructor", "then the successor is the smallest key in the right", "parent=node) \"\"\" The diagram below illustrates the process for inserting", "see that there are six cases to consider. Since the", "if node.is_leaf() and node.parent is not None: if node ==", "and self.root.key == key: self.root = None self.size = self.size", "child, so we know how to remove it using the", "implement the `in` operation by writing a `__contains__` method for", "this allows `_get` to be used as a flexible helper", "interface, we overload one of Python’s magic methods—in this case", "and return `True` if `_get` returns a value, or `False`", "return to them later when discussing deletion. \"\"\" class TreeNode(object):", "`put` functionality because we simply search the tree recursively until", "if the tree already has a root. If there is", "by writing a `__contains__` method for the `BinarySearchTree`. The `__contains__`", "parent to point to the current node’s left child. 2.", "reference of the right child to point to the parent", "constructor for a `TreeNode`, along with these helper functions, is", "None: return if self.left: # `in` calls `__iter__` so is", "node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right =", "key): self.delete(key) \"\"\" Once we’ve found the node containing the", "def __iter__(self): if self is None: return if self.left: #", "node’s right child. 3. If the current node has no", "then we only need to update the parent reference of", "the successor, we make use of the method `splice_out`. The", "is written recursively following the steps outlined above. Notice that", "if self.left: # `in` calls `__iter__` so is recursive for", "self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size = self.size - 1", "implementation with the `put` functionality. In order to enable the", "Notice that the `_get` method returns a `TreeNode` to `__getitem__`,", "iterator to operate lazily, in this case we use the", "scheduled for deletion. What we need is a node that", "the `__setitem__` magic method. In this method we first check", "self is None: return if self.left: # `in` calls `__iter__`", "is shown above and as you can see is a", "functions that make the work done in the `BinarySearchTree` class", "place of the node to be deleted. ![Deleting node 5,", "class will be to enable `put`ing to and `get`ing from", "will never be found during a search. A better way", "node has a left child. The decision proceeds as follows:", "that the core functionality of this class will be to", "the node has. The `TreeNode` class will also explicitly keep", "method as a Python generator. Pay close attention to the", "we have found the position in the tree where the", "val, left, right): self.key = key self.val = val self.left", "to the `__iter__` implementation as at first glance you might", "a node without children](figures/binary-search-tree-delete-1.png) The second case is only slightly", "the node containing the key we want to delete, there", "helper methods `find_successor` and `find_min` to find the successor. To", "deleted has no children 2. The node to be deleted", "then update the right child reference of the parent to", "`for x in` operation for iteration, it really is recursive!", "1. If the node has a right child, then the", "is returned. Again, inorder to enable a `tree[1]` retrieval interface,", "elem in self.left: yield elem yield self.key if self.right: #", "traverse a binary tree in order, using the `inorder` traversal", "node in the parent. The code for this case is", "parent, and itself has no right child, then the successor", "for our `BinarySearchTree` instances, we override the `__setitem__` magic method.", "the insertion of a duplicate key is for the value", "case is straightforward. If the current node has no children", "self.root) if result: return result.val raise KeyError def _get(self, key,", "child, then the successor is the smallest key in the", "child of its parent, then the parent is the successor.", "code to find the successor is shown above and as", "children The first case is straightforward. If the current node", "a node according to its own position as a child,", "condition is the only one that matters for us when", "current node, and then update the right child reference of", "`_get` method to find the `TreeNode` that needs to be", "its parent, then the parent is the successor. 3. If", "the node to be deleted. ![Deleting node 5, a node", "val) self.size = self.size + 1 def _put(self, key, val,", "flexible helper method for other `BinarySearchTree` methods that may need", "the tree. Therefore the `find_min` method simply follows the `left`", "a child, (left or right) and the kind of children", "functionality of this class will be to enable `put`ing to", "key of the root matches the key that is to", "using the two cases for deletion that we have already", "node. You will see why this is important when we", "with `__setitem__`, the primary purpose of this method is to", "the key node. \"\"\" else: # has both children successor", "val self.left = left self.right = right if self.left: self.left.parent", "by searching the tree. If the tree has more than", "the `TreeNode` that needs to be removed. If the tree", "= self.parent.find_successor() self.parent.right = self return successor def find_min(self): current", "constructed, the next task is to implement the retrieval of", "use of the same properties of binary search trees that", "Node 16, a node without children](figures/binary-search-tree-delete-1.png) The second case is", "root then we create a new `TreeNode` and set it", "current node, search the left subtree. If the new key", "the new node should be installed. - To add a", "removed. If the tree only has a single node, that", "both children successor = node.find_successor() if successor: successor.splice_out() node.key =", "class. This code makes use of the same properties of", "None self.size = self.size - 1 return raise KeyError('Error, key", "further methods `find_successor`, `find_min` and `splice_out` which you can ignore", "we will just replace the `key`, `val`, `left`, and `right`", "is a right child then we only need to update", "in the current node. If the new key is less", "and insert the object at the point discovered in the", "the node and remove the reference to this node in", "leave fixing this bug as an exercise for you. \"\"\"", "no right child, then the successor to this node is", "less than the current node, search the left subtree. If", "node.right) \"\"\" Using `_get`, we can implement the `in` operation", "or self.right if self.is_left_child(): self.parent.left = promoted_node else: self.parent.right =", "for inserting a new node in the tree. The `_put`", "child and is the left child of its parent, then", "self.left def has_one_child(self): return self.has_any_children() and not self.has_both_children() def replace_node_data(self,", "work done in the `BinarySearchTree` class methods much easier. The", "The code to find the successor is shown above and", "code sample. As you look at this code you will", "we make use of the method `splice_out`. The reason we", "and self.parent.right == self def is_leaf(self): return not (self.right or", "You will see why this is important when we discuss", "choosing the left or right child as the `_put` method.", "will preserve the binary search tree relationships for both of", "we have already implemented. Once the successor has been removed,", "properties of binary search trees that cause an inorder traversal", "them to take the node’s place. We can, however, search", "more than one node we search using the `_get` method", "check to make sure the key of the root matches", "we want to delete, there are three cases that we", "return `True` if `_get` returns a value, or `False` if", "child to search, we have found the position in the", "subtree. - When there is no left (or right) child", "node.left = self.TreeNodeClass(key, val, parent=node) else: if node.right: self._put(key, val,", "(see below). If a node has only a single child,", "comparing the new key to the key in the current", "Notice that we make use of the helper methods `find_successor`", "self.size == 1 and self.root.key == key: self.root = None", "has only a single child, then we can simply promote", "in the val of the node is returned. Again, inorder", "a search. A better way to handle the insertion of", "then we would waste time re-searching for the key node.", "def __delitem__(self, key): self.delete(key) \"\"\" Once we’ve found the node", "and as you can see is a method of the", "is the successor of its parent, excluding this node. The", "when we discuss the implementation for the `del` operator. One", "\"\"\" else: # has both children successor = node.find_successor() if", "or right child as the `_put` method. Notice that the", "same key value in the right subtree of the node", "child, then the successor to this node is the successor", "node.key: return self._get(key, node.left) return self._get(key, node.right) \"\"\" Using `_get`,", "own position as a child, (left or right) and the", "current = current.left return current def splice_out(self): if self.is_leaf(): if", "using the `_get` method to find the `TreeNode` that needs", "magic methods—in this case `__getitem__`. Just like with `__setitem__`, the", "cases to consider when looking for the successor: 1. If", "follows: 1. If the current node is a left child", "__delitem__(self, key): self.delete(key) \"\"\" Once we’ve found the node containing", "= 0 def __len__(self): return self.size def __iter__(self): return self.root.__iter__()", "simply put it in the tree in place of the", "this is the node that has the next-largest key in", "same logic for choosing the left or right child as", "below. Notice that we make use of the helper methods", "and `find_min` to find the successor. To remove the successor,", "this node the **successor**, and we will look at a", "the one scheduled for deletion. What we need is a", "traversal algorithm. However, because we want our iterator to operate", "case is the most difficult case to handle (see below).", "node.parent node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent = node.parent node.parent.right", "take the node’s place. We can, however, search the tree", "then the parent is the successor. 3. If the node", "The second case is only slightly more complicated (see below).", "Once the tree is constructed, the next task is to", "absence of a root node, and delegates the core `get`", "else: node.parent.right = None \"\"\" ![Deleting Node 16, a node", "node, search the right subtree. - When there is no", "the tree in place of the node to be deleted.", "key: self.root = None self.size = self.size - 1 return", "node is a right child then we only need to", "helper functions, is shown below. As you can see, many", "sure the key of the root matches the key that", "later when discussing deletion. \"\"\" class TreeNode(object): def __init__(self, key,", "in the tree where the new node should be installed.", "way to find the successor shortly. The successor is guaranteed", "if self.parent is None: return None if self.is_left_child(): return self.parent", "only one that matters for us when deleting a node", "self._get(key, node.right) \"\"\" Using `_get`, we can implement the `in`", "self.parent.right = None else: promoted_node = self.left or self.right if", "node if key < node.key: return self._get(key, node.left) return self._get(key,", "def remove(self, node): if node.is_leaf() and node.parent is not None:", "in the tree. The `_put` function is written recursively following", "data from the `TreeNode` besides the val. \"\"\" def __getitem__(self,", "node with key = 19](figures/binary-search-tree-put.png) Once the tree is constructed,", "Python generator. Pay close attention to the `__iter__` implementation as", "make sure the key of the root matches the key", "node.left: self._put(key, val, node.left) else: node.left = self.TreeNodeClass(key, val, parent=node)", "at the point discovered in the previous step. The code", "the third case is shown below. Notice that we make", "`get` functionality to `_get`. The search code in the `_get`", "root. In this case we will just replace the `key`,", "better way to handle the insertion of a duplicate key", "the tree, but we still must check to make sure", "self.delete(key) \"\"\" Once we’ve found the node containing the key", "`_put` method. Notice that the `_get` method returns a `TreeNode`", "successor of its parent, excluding this node. The first condition", "case is shown below. Notice that we make use of", "be used to replace the one scheduled for deletion. What", "not a root then we create a new `TreeNode` and", "the tree. We call this node the **successor**, and we", "of the subtree until it reaches a node that does", "inserted into the tree, the `node` is passed to the", "val, node.right) else: node.right = self.TreeNodeClass(key, val, parent=node) \"\"\" The", "this bug as an exercise for you. \"\"\" class BinarySearchTree(object):", "is the right child of its parent, and itself has", "(see below). If a node has two children, then it", "method will simply call `_get` and return `True` if `_get`", "and `get`ing from the tree, so we begin our implementation", "left self.right = right self.parent = parent def is_left_child(self): return", "def __init__(self, key, val, left=None, right=None, parent=None): self.key = key", "shows the Python code for inserting a new node in", "uses the same logic for choosing the left or right", "node.is_leaf() and node.parent is not None: if node == node.parent.left:", "tree. If a root node is already in place then", "__len__(self): return self.size def __iter__(self): return self.root.__iter__() def __setitem__(self, key,", "below. It includes three further methods `find_successor`, `find_min` and `splice_out`", "node is the successor of its parent, excluding this node.", "key in any binary search tree is the leftmost child", "as a flexible helper method for other `BinarySearchTree` methods that", "below. As you can see, many of these helper functions", "key, val, left, right): self.key = key self.val = val", "tree is implemented a duplicate key will create a new", "\"\"\" The diagram below illustrates the process for inserting a", "self.root.key == key: self.root = None self.size = self.size -", "other data from the `TreeNode` besides the val. \"\"\" def", "recurse again for elem in self.right: yield elem def find_successor(self):", "purpose of this method is to handle presence and absence", "of the tree, but we still must check to make", "are six cases to consider. Since the cases are symmetric", "utf-8 -*- \"\"\" The `TreeNode` class provides many helper functions", "most difficult case to handle (see below). If a node", "self if self.right: self.right.parent = self def __iter__(self): if self", "is shown below. As you can see, many of these", "1 and self.root.key == key: self.root = None self.size =", "of its parent, excluding this node. The first condition is", "find the minimum key in a subtree. You should convince", "a `tree[1]` retrieval interface, we overload one of Python’s magic", "of this is that the node with the new key", "a key. The first task is to find the node", "self.parent.left = promoted_node else: self.parent.right = promoted_node promoted_node.parent = self.parent", "node.right if node.is_left_child(): promoted_node.parent = node.parent node.parent.left = promoted_node elif", "`TreeNode` class will also explicitly keep track of the parent", "has a right child, then the successor is the smallest", "deletion that we have already implemented. Once the successor has", "our tree is implemented a duplicate key will create a", "would waste time re-searching for the key node. \"\"\" else:", "of the node having the original key. The result of", "the same logic for choosing the left or right child", "recursive: in fact, because `__iter__` overrides the `for x in`", "preserve the binary search tree relationships for both of the", "that it goes directly to the node we want to", "The `find_min` method is called to find the minimum key", "val, node): if key < node.key: if node.left: self._put(key, val,", "def is_left_child(self): return self.parent and self.parent.left == self def is_right_child(self):", "order. You already know how to traverse a binary tree", "know how to traverse a binary tree in order, using", "shown above and as you can see is a method", "of a value for a given key. The `get` functionality", "key in the tree. We call this node the **successor**,", "`splice_out` is that it goes directly to the node we", "enable the `tree[1] = 'foo'` style assignment interface for our", "def is_leaf(self): return not (self.right or self.left) def has_any_children(self): return", "# has both children successor = node.find_successor() if successor: successor.splice_out()", "which you can ignore for now as we will return", "tree. The `_put` function is written recursively following the steps", "- When there is no left (or right) child to", "self.root = self.TreeNodeClass(key, val) self.size = self.size + 1 def", "functionality. In order to enable the `tree[1] = 'foo'` style", "we can begin to write `BinarySearchTree` itself. Recall that the", "duplicate key will create a new node with the same", "one node we search using the `_get` method to find", "self.right and self.left def has_one_child(self): return self.has_any_children() and not self.has_both_children()", "tree, so we begin our implementation with the `put` functionality.", "node containing the key we want to delete, there are", "if self.root: self._put(key, val, self.root) else: self.root = self.TreeNodeClass(key, val)", "else: node.left = self.TreeNodeClass(key, val, parent=node) else: if node.right: self._put(key,", "we need is a node that will preserve the binary", "the right child to point to the parent of the", "is inserted into the tree, the `node` is passed to", "== key: return node if key < node.key: return self._get(key,", "case if the key is not found the `del` operator", "call this node the **successor**, and we will look at", "this node in the parent. The code for this case", "it is unlikely that we can simply promote one of", "else: # has both children successor = node.find_successor() if successor:", "than the current node, search the right subtree. - When", "method for the `BinarySearchTree`. The `__contains__` method will simply call", "written recursively following the steps outlined above. Notice that when", "promoted_node.parent = node.parent node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val,", "child of the tree. Therefore the `find_min` method simply follows", "node, search the left subtree. If the new key is", "the current node has no parent, it must be the", "== self def is_leaf(self): return not (self.right or self.left) def", "parent is the successor. 3. If the node is the", "and makes the right changes. We could call `delete` recursively,", "method. In this method we first check to see if", "the following algorithm: - Starting at the root of the", "the parent to point to the current node’s right child.", "then we create a new `TreeNode` and set it as", "successor has been removed, we simply put it in the", "we make use of the helper methods `find_successor` and `find_min`", "has no parent, it must be the root. In this", "self.left def has_both_children(self): return self.right and self.left def has_one_child(self): return", "successor shortly. The successor is guaranteed to have no more", "of its parent, and itself has no right child, then", "functionality to `_get`. The search code in the `_get` method", "the child to take the place of its parent. The", "Once we’ve found the node containing the key we want", "subtrees. The node that will do this is the node", "val of the node is returned. Again, inorder to enable", "raise KeyError('Error, key not in tree') def __delitem__(self, key): self.delete(key)", "directly to the node we want to splice out and", "algorithm. However, because we want our iterator to operate lazily,", "\"\"\" def remove(self, node): if node.is_leaf() and node.parent is not", "out the nodes in the tree from smallest to largest.", "the core functionality of this class will be to enable", "the `node` is passed to the new tree as the", "passed to the new tree as the parent. One important", "method on the root. Code for this decision process may", "The `TreeNode` class will also explicitly keep track of the", "to remove it using the two cases for deletion that", "the retrieval of a value for a given key. The", "![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png) The second case", "node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right )", "a flexible helper method for other `BinarySearchTree` methods that may", "can, however, search the tree for a node that can", "`_get`, we can implement the `in` operation by writing a", "the more interesting methods of `TreeNode` provides an interface to", "to the new tree as the parent. One important problem", "them later when discussing deletion. \"\"\" class TreeNode(object): def __init__(self,", "class methods much easier. The constructor for a `TreeNode`, along", "The node to be deleted has two children The first", "will see why this is important when we discuss the", "if the key is not found the `del` operator raises", "if self.right: # recurse again for elem in self.right: yield", "\"\"\" def delete(self, key): if self.size > 1: node_to_remove =", "The `__contains__` method will simply call `_get` and return `True`", "parent. The code for this case is shown below. \"\"\"", "is a method of the `TreeNode` class. This code makes", "is recursive for elem in self.left: yield elem yield self.key", "current node is a left child then we only need", "self.has_any_children() and not self.has_both_children() def replace_node_data(self, key, val, left, right):", "+ 1 def _put(self, key, val, node): if key <", "this case `__getitem__`. Just like with `__setitem__`, the primary purpose", "above and as you can see is a method of", "we will just discuss the case where the current node", "this method is to handle presence and absence of a", "discuss the implementation for the `del` operator. One of the", "to and `get`ing from the tree, so we begin our", "the node is returned. Again, inorder to enable a `tree[1]`", "deleted has two children The first case is straightforward. If", "child is inserted into the tree, the `node` is passed", "to take the node’s place. We can, however, search the", "method for other `BinarySearchTree` methods that may need to make", "the `key`, `val`, `left`, and `right` data by calling the", "the binary search tree: the deletion of a key. The", "is important when we discuss the implementation for the `del`", "first condition is the only one that matters for us", "\"\"\" The code to find the successor is shown above", "then update the left child reference of the parent to", "current node has no children all we need to do", "KeyError('Error, key not in tree') def __delitem__(self, key): self.delete(key) \"\"\"", "simply iterate over all the keys in the tree in", "that the minimum valued key in any binary search tree", "the same key value in the right subtree of the", "to write `BinarySearchTree` itself. Recall that the core functionality of", "The code below shows the Python code for inserting a", "calls the private, recursive, helper function `_put` to search the", "makes the right changes. We could call `delete` recursively, but", "node according to its own position as a child, (left", "includes three further methods `find_successor`, `find_min` and `splice_out` which you", "key will never be found during a search. A better", "use of other data from the `TreeNode` besides the val.", "found the position in the tree where the new node", "smallest to largest. There are three cases to consider when", "do this is the node that has the next-largest key", "the `put` functionality because we simply search the tree recursively", "None successor = self.parent.find_successor() self.parent.right = self return successor def", "not found the `del` operator raises an error. \"\"\" def", "`TreeNode` to `__getitem__`, this allows `_get` to be used as", "provides an interface to simply iterate over all the keys", "if not node: return None if node.key == key: return", "the **successor**, and we will look at a way to", "self.size + 1 def _put(self, key, val, node): if key", "tree recursively until we get to a non-matching leaf node", "right child to point to the parent of the current", "child](figures/binary-search-tree-delete-2.png) The third case is the most difficult case to", "again for elem in self.right: yield elem def find_successor(self): if", "a binary tree in order, using the `inorder` traversal algorithm.", "promote one of them to take the node’s place. We", "still must check to make sure the key of the", "self.left = left self.right = right self.parent = parent def", "must be the root. In this case we will just", "is already in place then `put` calls the private, recursive,", "make use of the method `splice_out`. The reason we use", "promoted_node = node.left or node.right if node.is_left_child(): promoted_node.parent = node.parent", "a node that will preserve the binary search tree relationships", "of the current node, and then update the left child", "if self.is_left_child(): self.parent.left = promoted_node else: self.parent.right = promoted_node promoted_node.parent", "the existing left and right subtrees. The node that will", "deletion. What we need is a node that will preserve", "The code for this case is shown in the next", "the tree. If a root node is already in place", "from smallest to largest. There are three cases to consider", "first task is to find the node to delete by", "is a left child then we only need to update", "must consider: 1. The node to be deleted has no", "case we will just replace the `key`, `val`, `left`, and", "the successor: 1. If the node has a right child,", "== node.parent.left: node.parent.left = None else: node.parent.right = None \"\"\"", "is None: return None if self.is_left_child(): return self.parent self.parent.right =", "val, left=None, right=None, parent=None): self.key = key self.val = val", "than one node we search using the `_get` method to", "with the new key to replace the old value. We", "the node to delete by searching the tree. If the", "tree in place of the node to be deleted. ![Deleting", "node, and then update the right child reference of the", "elif node.has_one_child(): promoted_node = node.left or node.right if node.is_left_child(): promoted_node.parent", "search the binary tree comparing the new key to the", "with the same key value in the right subtree of", "TreeNode def __init__(self): self.root = None self.size = 0 def", "has no children 2. The node to be deleted has", "leftmost child of the tree. Therefore the `find_min` method simply", "__iter__(self): return self.root.__iter__() def __setitem__(self, key, val): if self.root: self._put(key,", "remove(self, node): if node.is_leaf() and node.parent is not None: if", "`put` functionality. In order to enable the `tree[1] = 'foo'`", "yield elem def find_successor(self): if self.right: return self.right.find_min() if self.parent", "binary search trees that cause an inorder traversal to print", "if it returns `None`. The code for `__contains__` is shown", "the right subtree. 2. If the node has no right", "key to the key in the current node. If the", "the primary purpose of this method is to handle presence", "key in the current node. If the new key is", "def __iter__(self): return self.root.__iter__() def __setitem__(self, key, val): if self.root:", "has no children all we need to do is delete", "node has two children, then it is unlikely that we", "reference of the parent to point to the current node’s", "Recall that the core functionality of this class will be", "`tree[1]` retrieval interface, we overload one of Python’s magic methods—in", "will also explicitly keep track of the parent as an", "will just discuss the case where the current node has", "operator raises an error. \"\"\" def delete(self, key): if self.size", "the left child reference of the parent to point to", "this case we use the `yield` keyword to define our", "the implementation for the `del` operator. One of the more", "or right) and the kind of children the node has.", "node having the original key. The result of this is", "the `find_min` method simply follows the `left` references in each", "val, self.root) else: self.root = self.TreeNodeClass(key, val) self.size = self.size", "= successor.val \"\"\" The code to find the successor is", "The successor is guaranteed to have no more than one", "for both of the existing left and right subtrees. The", "self def is_right_child(self): return self.parent and self.parent.right == self def", "the current node’s right child. 3. If the current node", "should be installed. - To add a node to the", "successor.val \"\"\" The code to find the successor is shown", "is shown below. \"\"\" def __contains__(self, key): return bool(self._get(key, self.root))", "the previous step. The code below shows the Python code", "`replace_node_data` method on the root. Code for this decision process", "implemented a duplicate key will create a new node with", "(left or right) and the kind of children the node", "Since the cases are symmetric with respect to either having", "node, and delegates the core `get` functionality to `_get`. The", "the tree, search the binary tree comparing the new key", "may look like: \"\"\" elif node.has_one_child(): promoted_node = node.left or", "node.right = self.TreeNodeClass(key, val, parent=node) \"\"\" The diagram below illustrates", "self._put(key, val, node.right) else: node.right = self.TreeNodeClass(key, val, parent=node) \"\"\"", "node to the tree, create a new `TreeNode` object and", "a single node, that means we are removing the root", "3. The node to be deleted has two children The", "the node is the right child of its parent, and", "children, then it is unlikely that we can simply promote", "reference to this node in the parent. The code for", "if node.is_left_child(): promoted_node.parent = node.parent node.parent.left = promoted_node elif node.is_right_child():", "the object at the point discovered in the previous step.", "and absence of a root node, and delegates the core", "calling the `replace_node_data` method on the root. Code for this", "self while current.left: current = current.left return current def splice_out(self):", "the core `get` functionality to `_get`. The search code in", "the nodes in the tree from smallest to largest. There", "for elem in self.right: yield elem def find_successor(self): if self.right:", "search the right subtree. - When there is no left", "self._get(key, self.root) if result: return result.val raise KeyError def _get(self,", "can simply promote one of them to take the node’s", "returns `None`. The code for `__contains__` is shown below. \"\"\"", "= self.parent \"\"\" Now that we have our `TreeNode` class", "elem def find_successor(self): if self.right: return self.right.find_min() if self.parent is", "child to point to the parent of the current node,", "it really is recursive! Our full implementation of `TreeNode` is", "case we use the `yield` keyword to define our `__iter__`", "search the tree according to the following algorithm: - Starting", "update the right child reference of the parent to point", "search. A better way to handle the insertion of a", "node in the tree. The `_put` function is written recursively", "simply promote the child to take the place of its", "child as the `_put` method. Notice that the `_get` method", "to enable the `tree[1] = 'foo'` style assignment interface for", "the tree. The `_put` function is written recursively following the", "into the tree, the `node` is passed to the new", "the root matches the key that is to be deleted.", "to point to the parent of the current node, and", "below shows the Python code for inserting a new node", "point to the current node’s left child. 2. If the", "update the parent reference of the right child to point", "below). If a node has only a single child, then", "following algorithm: - Starting at the root of the tree,", "according to the following algorithm: - Starting at the root", "you will see that there are six cases to consider.", "= self while current.left: current = current.left return current def", "key is not found the `del` operator raises an error.", "node with two children](figures/binary-search-tree-delete-3.png) The code to handle the third", "a new child is inserted into the tree, the `node`", "None if node.key == key: return node if key <", "six cases to consider. Since the cases are symmetric with", "indicate the nodes that were visited during the insertion process.", "successor is guaranteed to have no more than one child,", "self.right = right self.parent = parent def is_left_child(self): return self.parent", "tree is the leftmost child of the tree. Therefore the", "Just like with `__setitem__`, the primary purpose of this method", "attention to the most challenging method in the binary search", "there are six cases to consider. Since the cases are", "of the more interesting methods of `TreeNode` provides an interface", "when looking for the successor: 1. If the node has", "right subtree. - When there is no left (or right)", "left child then we only need to update the parent", "else: if node.right: self._put(key, val, node.right) else: node.right = self.TreeNodeClass(key,", "yourself that the minimum valued key in any binary search", "recursive for elem in self.left: yield elem yield self.key if", "successor. To remove the successor, we make use of the", "current node, search the right subtree. - When there is", "implementation of insert is that duplicate keys are not handled", "method is called to find the minimum key in a", "self.root: result = self._get(key, self.root) if result: return result.val raise", "if result: return result.val raise KeyError def _get(self, key, node):", "self.key = key self.val = val self.left = left self.right", "handle presence and absence of a root node, and delegates", "has no right child and is the left child of", "call `_get` and return `True` if `_get` returns a value,", "has only one child 3. The node to be deleted", "has a single child](figures/binary-search-tree-delete-2.png) The third case is the most", "we only need to update the parent reference of the", "convince yourself that the minimum valued key in any binary", "to the key in the current node. If the new", "raise KeyError def _get(self, key, node): if not node: return", "to the tree, create a new `TreeNode` object and insert", "`__iter__` method as a Python generator. Pay close attention to", "the tree has more than one node we search using", "by calling the `replace_node_data` method on the root. Code for", "result of this is that the node with the new", "old value. We leave fixing this bug as an exercise", "node to be deleted has two children The first case", "def __init__(self): self.root = None self.size = 0 def __len__(self):", "the left child to point to the parent of the", "self._get(key, node.left) return self._get(key, node.right) \"\"\" Using `_get`, we can", "current node’s right child. 3. If the current node has", "want to splice out and makes the right changes. We", "cases are symmetric with respect to either having a left", "lazily, in this case we use the `yield` keyword to", "will do this is the node that has the next-largest", "class we can begin to write `BinarySearchTree` itself. Recall that", "illustrates the process for inserting a new node into a", "If the new key is greater than the current node,", "is not a root then we create a new `TreeNode`", "= node.parent node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left,", "until we get to a non-matching leaf node or find", "if self.root: result = self._get(key, self.root) if result: return result.val", "our `__iter__` method as a Python generator. Pay close attention", "of the method `splice_out`. The reason we use `splice_out` is", "make use of other data from the `TreeNode` besides the", "waste time re-searching for the key node. \"\"\" else: #", "no left (or right) child to search, we have found", "class TreeNode(object): def __init__(self, key, val, left=None, right=None, parent=None): self.key", "root. Code for this decision process may look like: \"\"\"", "a single child, then we can simply promote the child", "algorithm: - Starting at the root of the tree, search", "the reference to this node in the parent. The code", "to this node in the parent. The code for this", "If the node is the right child of its parent,", "for this case is shown below. \"\"\" def remove(self, node):", "can see, many of these helper functions help to classify", "is the node that has the next-largest key in the", "class provides many helper functions that make the work done", "return result.val raise KeyError def _get(self, key, node): if not", "`__iter__` so is recursive for elem in self.left: yield elem", "`TreeNode`, along with these helper functions, is shown below. As", "point discovered in the previous step. The code below shows", "the `TreeNode` class. This code makes use of the same", "to make sure the key of the root matches the", "without children](figures/binary-search-tree-delete-1.png) The second case is only slightly more complicated", "- 1 return raise KeyError('Error, key not in tree') def", "its parent, and itself has no right child, then the", "result: return result.val raise KeyError def _get(self, key, node): if", "to be deleted has no children 2. The node to", "You already know how to traverse a binary tree in", "shaded nodes indicate the nodes that were visited during the", "already implemented. Once the successor has been removed, we simply", "for elem in self.left: yield elem yield self.key if self.right:", "If there is not a root then we create a", "def __contains__(self, key): return bool(self._get(key, self.root)) \"\"\" Finally, we turn", "The node to be deleted has only one child 3.", "current def splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left = None", "the `in` operation by writing a `__contains__` method for the", "val, parent=node) \"\"\" The diagram below illustrates the process for", "decision process may look like: \"\"\" elif node.has_one_child(): promoted_node =", "tree') def __delitem__(self, key): self.delete(key) \"\"\" Once we’ve found the", "keep track of the parent as an attribute of each", "in each node of the subtree until it reaches a", "second case is only slightly more complicated (see below). If", "consider. Since the cases are symmetric with respect to either", "the root of the tree, search the binary tree comparing", "parent, then the parent is the successor. 3. If the", "fixing this bug as an exercise for you. \"\"\" class", "new node in the tree. The `_put` function is written", "tree according to the following algorithm: - Starting at the", "nodes in the tree from smallest to largest. There are", "process for inserting a new node into a binary search", "the most difficult case to handle (see below). If a", "begin our implementation with the `put` functionality. In order to", "`inorder` traversal algorithm. However, because we want our iterator to", "`__setitem__`, the primary purpose of this method is to handle", "an inorder traversal to print out the nodes in the", "the successor. 3. If the node is the right child", "is to implement the retrieval of a value for a", "self.parent self.parent.right = None successor = self.parent.find_successor() self.parent.right = self", "The code for this case is shown below. \"\"\" def", "many helper functions that make the work done in the", "tree has more than one node we search using the", "deletion. \"\"\" class TreeNode(object): def __init__(self, key, val, left=None, right=None,", "of the `TreeNode` class. This code makes use of the", "parent, it must be the root. In this case we", "matters for us when deleting a node from a binary", "_put(self, key, val, node): if key < node.key: if node.left:", "a node has two children, then it is unlikely that", "self.left: yield elem yield self.key if self.right: # recurse again", "node 5, a node with two children](figures/binary-search-tree-delete-3.png) The code to", "25, a node that has a single child](figures/binary-search-tree-delete-2.png) The third", "the parent is the successor. 3. If the node is", "case `__getitem__`. Just like with `__setitem__`, the primary purpose of", "This code makes use of the same properties of binary", "diagram below illustrates the process for inserting a new node", "and right subtrees. The node that will do this is", "is passed to the new tree as the parent. One", "difficult case to handle (see below). If a node has", "def __len__(self): return self.size def __iter__(self): return self.root.__iter__() def __setitem__(self,", "you. \"\"\" class BinarySearchTree(object): TreeNodeClass = TreeNode def __init__(self): self.root", "of the node is returned. Again, inorder to enable a", "right self.parent = parent def is_left_child(self): return self.parent and self.parent.left", "root node, and delegates the core `get` functionality to `_get`.", "to `_get`. The search code in the `_get` method uses", "get to a non-matching leaf node or find a matching", "if self.is_left_child(): self.parent.left = None else: self.parent.right = None else:", "If the current node is a right child then we", "two children, then it is unlikely that we can simply", "successor = node.find_successor() if successor: successor.splice_out() node.key = successor.key node.val", "the most challenging method in the binary search tree: the", "if node_to_remove: self.remove(node_to_remove) self.size = self.size - 1 return elif", "If the node has a right child, then the successor", "were visited during the insertion process. ![Inserting a node with", "parent def is_left_child(self): return self.parent and self.parent.left == self def", "else: self.parent.right = promoted_node promoted_node.parent = self.parent \"\"\" Now that", "node that has the next-largest key in the tree. We", "code below shows the Python code for inserting a new", "single node, that means we are removing the root of", "with key = 19](figures/binary-search-tree-put.png) Once the tree is constructed, the", "is the only one that matters for us when deleting", "value for a given key. The `get` functionality is even", "return bool(self._get(key, self.root)) \"\"\" Finally, we turn our attention to", "the val. \"\"\" def __getitem__(self, key): if self.root: result =", "shown in the next code sample. As you look at", "has two children The first case is straightforward. If the", "a left or right child we will just discuss the", "the next task is to implement the retrieval of a", "`in` calls `__iter__` so is recursive for elem in self.left:", "node has no parent, it must be the root. In", "however, search the tree for a node that can be", "`yield` keyword to define our `__iter__` method as a Python", "we can implement the `in` operation by writing a `__contains__`", "method. Notice that the `_get` method returns a `TreeNode` to", "operation by writing a `__contains__` method for the `BinarySearchTree`. The", "just discuss the case where the current node has a", "like: \"\"\" elif node.has_one_child(): promoted_node = node.left or node.right if", "these helper functions, is shown below. As you can see,", "to point to the current node’s right child. 3. If", "re-searching for the key node. \"\"\" else: # has both", "as follows: 1. If the current node is a left", "of a duplicate key is for the value associated with", "right child we will just discuss the case where the", "the parent reference of the right child to point to", "the deletion of a key. The first task is to", "find_successor(self): if self.right: return self.right.find_min() if self.parent is None: return", "of the helper methods `find_successor` and `find_min` to find the", "keys are not handled properly. As our tree is implemented", "def __getitem__(self, key): if self.root: result = self._get(key, self.root) if", "self.right: yield elem def find_successor(self): if self.right: return self.right.find_min() if", "def delete(self, key): if self.size > 1: node_to_remove = self._get(key,", "returned. Again, inorder to enable a `tree[1]` retrieval interface, we", "to operate lazily, in this case we use the `yield`", "If a node has two children, then it is unlikely", "node.right: self._put(key, val, node.right) else: node.right = self.TreeNodeClass(key, val, parent=node)", "`__iter__` overrides the `for x in` operation for iteration, it", "from the `TreeNode` besides the val. \"\"\" def __getitem__(self, key):", "has_one_child(self): return self.has_any_children() and not self.has_both_children() def replace_node_data(self, key, val,", "want to delete, there are three cases that we must", "in order, using the `inorder` traversal algorithm. However, because we", "to find the `TreeNode` that needs to be removed. If", "we will look at a way to find the successor", "left or right child we will just discuss the case", "of the tree. If a root node is already in", "return self.parent and self.parent.right == self def is_leaf(self): return not", "self.root)) \"\"\" Finally, we turn our attention to the most", "return node if key < node.key: return self._get(key, node.left) return", "parent=None): self.key = key self.val = val self.left = left", "as the parent. One important problem with our implementation of", "search trees that cause an inorder traversal to print out", "need to do is delete the node and remove the", "for this decision process may look like: \"\"\" elif node.has_one_child():", "implement the retrieval of a value for a given key.", "value in the right subtree of the node having the", "be deleted has no children 2. The node to be", "the right subtree. - When there is no left (or", "`TreeNode` and set it as the root of the tree.", "The node to be deleted has no children 2. The", "= TreeNode def __init__(self): self.root = None self.size = 0", "find the successor shortly. The successor is guaranteed to have", "methods—in this case `__getitem__`. Just like with `__setitem__`, the primary", "`val`, `left`, and `right` data by calling the `replace_node_data` method", "the `_get` method returns a `TreeNode` to `__getitem__`, this allows", "we can simply promote the child to take the place", "of each node. You will see why this is important", "binary search tree. The `find_min` method is called to find", "self return successor def find_min(self): current = self while current.left:", "of its parent, then the parent is the successor. 3.", "has_any_children(self): return self.right or self.left def has_both_children(self): return self.right and", "or right child we will just discuss the case where", "node.has_one_child(): promoted_node = node.left or node.right if node.is_left_child(): promoted_node.parent =", "`None`. The code for `__contains__` is shown below. \"\"\" def", "according to its own position as a child, (left or", "not (self.right or self.left) def has_any_children(self): return self.right or self.left", "single child, then we can simply promote the child to", "The code for `__contains__` is shown below. \"\"\" def __contains__(self,", "tree relationships for both of the existing left and right", "with these helper functions, is shown below. As you can", "= val self.left = left self.right = right self.parent =", "You should convince yourself that the minimum valued key in", "found during a search. A better way to handle the", "create a new `TreeNode` and set it as the root", "tree as the parent. One important problem with our implementation", "node.left) else: node.left = self.TreeNodeClass(key, val, parent=node) else: if node.right:", "this case is shown in the next code sample. As", "to the most challenging method in the binary search tree:", "- Starting at the root of the tree, search the", "case is shown below. \"\"\" def remove(self, node): if node.is_leaf()", "using the `inorder` traversal algorithm. However, because we want our", "promoted_node.right ) \"\"\" ![Deleting node 25, a node that has", "left=None, right=None, parent=None): self.key = key self.val = val self.left", "== self def is_right_child(self): return self.parent and self.parent.right == self", "the `TreeNode` besides the val. \"\"\" def __getitem__(self, key): if", "node to be deleted. ![Deleting node 5, a node with", "we first check to see if the tree already has", "the current node, search the right subtree. - When there", "that will preserve the binary search tree relationships for both", "the current node is a right child then we only", "use of the helper methods `find_successor` and `find_min` to find", "is not recursive: in fact, because `__iter__` overrides the `for", "easier than the `put` functionality because we simply search the", "node has no children all we need to do is", "to find the node to delete by searching the tree.", "has more than one node we search using the `_get`", "The reason we use `splice_out` is that it goes directly", "= self.TreeNodeClass(key, val, parent=node) \"\"\" The diagram below illustrates the", "of the parent as an attribute of each node. You", "self._put(key, val, node.left) else: node.left = self.TreeNodeClass(key, val, parent=node) else:", "is less than the current node, search the left subtree.", "__setitem__(self, key, val): if self.root: self._put(key, val, self.root) else: self.root", "removed, we simply put it in the tree in place", "that may need to make use of other data from", "to be deleted has only one child 3. The node", "the only one that matters for us when deleting a", "root node is already in place then `put` calls the", "< node.key: if node.left: self._put(key, val, node.left) else: node.left =", "one of them to take the node’s place. We can,", "only need to update the parent reference of the right", "recursive! Our full implementation of `TreeNode` is provided below. It", "create a new `TreeNode` object and insert the object at", "its parent, excluding this node. The first condition is the", "functionality because we simply search the tree recursively until we", "need to update the parent reference of the left child", "be deleted. In either case if the key is not", "helper functions that make the work done in the `BinarySearchTree`", "to a non-matching leaf node or find a matching key.", "use of the method `splice_out`. The reason we use `splice_out`", "position as a child, (left or right) and the kind", "or self.left def has_both_children(self): return self.right and self.left def has_one_child(self):", "it returns `None`. The code for `__contains__` is shown below.", "complicated (see below). If a node has only a single", "the current node’s left child. 2. If the current node", "return self.root.__iter__() def __setitem__(self, key, val): if self.root: self._put(key, val,", "parent of the current node, and then update the left", "new child is inserted into the tree, the `node` is", "and self.parent.left == self def is_right_child(self): return self.parent and self.parent.right", "child. 2. If the current node is a right child", "a duplicate key is for the value associated with the", "glance you might think that the code is not recursive:", "only a single child, then we can simply promote the", "a `TreeNode` to `__getitem__`, this allows `_get` to be used", "our `TreeNode` class we can begin to write `BinarySearchTree` itself.", "`left` references in each node of the subtree until it", "key, node): if not node: return None if node.key ==", "self.TreeNodeClass(key, val, parent=node) \"\"\" The diagram below illustrates the process", "_get(self, key, node): if not node: return None if node.key", "respect to either having a left or right child we", "`del` operator. One of the more interesting methods of `TreeNode`", "node.right) else: node.right = self.TreeNodeClass(key, val, parent=node) \"\"\" The diagram", "There are three cases to consider when looking for the", "that needs to be removed. If the tree only has", "right subtree. 2. If the node has no right child", "= promoted_node promoted_node.parent = self.parent \"\"\" Now that we have", "== key: self.root = None self.size = self.size - 1", "\"\"\" class TreeNode(object): def __init__(self, key, val, left=None, right=None, parent=None):", "node has a right child, then the successor is the", "to be deleted. ![Deleting node 5, a node with two", "self.left: # `in` calls `__iter__` so is recursive for elem", "search using the `_get` method to find the `TreeNode` that", "point to the current node’s right child. 3. If the", "this code you will see that there are six cases", "is recursive! Our full implementation of `TreeNode` is provided below.", "Our full implementation of `TreeNode` is provided below. It includes", "a root then we create a new `TreeNode` and set", "the node that has the next-largest key in the tree.", "of the root matches the key that is to be", "one scheduled for deletion. What we need is a node", "no children 2. The node to be deleted has only", "is to be deleted. In either case if the key", "parent reference of the right child to point to the", "left child reference of the parent to point to the", "tree. The lightly shaded nodes indicate the nodes that were", "the kind of children the node has. The `TreeNode` class", "look like: \"\"\" elif node.has_one_child(): promoted_node = node.left or node.right", "to search, we have found the position in the tree", "we still must check to make sure the key of", "< node.key: return self._get(key, node.left) return self._get(key, node.right) \"\"\" Using", "If the tree only has a single node, that means", "is called to find the minimum key in a subtree.", "can simply promote the child to take the place of", "node that has a single child](figures/binary-search-tree-delete-2.png) The third case is", "found the `del` operator raises an error. \"\"\" def delete(self,", "for the successor: 1. If the node has a right", "self._put(key, val, self.root) else: self.root = self.TreeNodeClass(key, val) self.size =", "node): if node.is_leaf() and node.parent is not None: if node", "children the node has. The `TreeNode` class will also explicitly", "we turn our attention to the most challenging method in", "find a matching key. When a matching key is found,", "Pay close attention to the `__iter__` implementation as at first", "the `_put` method. Notice that the `_get` method returns a", "`__contains__` is shown below. \"\"\" def __contains__(self, key): return bool(self._get(key,", "this node is the successor of its parent, excluding this", "node): if key < node.key: if node.left: self._put(key, val, node.left)", "result.val raise KeyError def _get(self, key, node): if not node:", "code for `__contains__` is shown below. \"\"\" def __contains__(self, key):", "return raise KeyError('Error, key not in tree') def __delitem__(self, key):", "this class will be to enable `put`ing to and `get`ing", "make use of the helper methods `find_successor` and `find_min` to", "that means we are removing the root of the tree,", "that the node with the new key will never be", "key: return node if key < node.key: return self._get(key, node.left)", "the Python code for inserting a new node in the", "def find_min(self): current = self while current.left: current = current.left", "there is not a root then we create a new", "= None \"\"\" ![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png)", "magic method. In this method we first check to see", "node with the same key value in the right subtree", "new node into a binary search tree. The lightly shaded", "node to delete by searching the tree. If the tree", "= left self.right = right self.parent = parent def is_left_child(self):", "we will return to them later when discussing deletion. \"\"\"", "= promoted_node else: self.parent.right = promoted_node promoted_node.parent = self.parent \"\"\"", "def splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left = None else:", "that will do this is the node that has the", "to its own position as a child, (left or right)", "methods `find_successor` and `find_min` to find the successor. To remove", "left (or right) child to search, we have found the", "way to handle the insertion of a duplicate key is", "search tree. The lightly shaded nodes indicate the nodes that", "the left child of its parent, then the parent is", "logic for choosing the left or right child as the", "so we begin our implementation with the `put` functionality. In", "`find_min` method is called to find the minimum key in", "in tree') def __delitem__(self, key): self.delete(key) \"\"\" Once we’ve found", "in the tree in place of the node to be", "One important problem with our implementation of insert is that", "inorder to enable a `tree[1]` retrieval interface, we overload one", "of children the node has. The `TreeNode` class will also", "`TreeNode` object and insert the object at the point discovered", "our attention to the most challenging method in the binary", "node.left) return self._get(key, node.right) \"\"\" Using `_get`, we can implement", "the process for inserting a new node into a binary", "The lightly shaded nodes indicate the nodes that were visited", "the new key is less than the current node, search", "\"\"\" Using `_get`, we can implement the `in` operation by", "value stored in the val of the node is returned.", "tree in order, using the `inorder` traversal algorithm. However, because", "and set it as the root of the tree. If", "1 return elif self.size == 1 and self.root.key == key:", ") \"\"\" ![Deleting node 25, a node that has a", "__init__(self): self.root = None self.size = 0 def __len__(self): return", "cases to consider. Since the cases are symmetric with respect", "the subtree until it reaches a node that does not", "key in the right subtree. 2. If the node has", "a new node in the tree. The `_put` function is", "handled properly. As our tree is implemented a duplicate key", "# `in` calls `__iter__` so is recursive for elem in", "right): self.key = key self.val = val self.left = left", "to handle the insertion of a duplicate key is for", "valued key in any binary search tree is the leftmost", "only has a single node, that means we are removing", "below illustrates the process for inserting a new node into", "below). If a node has two children, then it is", "node with the new key will never be found during", "tree comparing the new key to the key in the", "insertion process. ![Inserting a node with key = 19](figures/binary-search-tree-put.png) Once", "to the following algorithm: - Starting at the root of", "create a new node with the same key value in", "# -*- coding: utf-8 -*- \"\"\" The `TreeNode` class provides", "besides the val. \"\"\" def __getitem__(self, key): if self.root: result", "of the right child to point to the parent of", "we’ve found the node containing the key we want to", "the parent. The code for this case is shown below.", "assignment interface for our `BinarySearchTree` instances, we override the `__setitem__`", "the `yield` keyword to define our `__iter__` method as a", "in the binary search tree: the deletion of a key.", "the `inorder` traversal algorithm. However, because we want our iterator", "to be deleted. In either case if the key is", "many of these helper functions help to classify a node", "self.right: # recurse again for elem in self.right: yield elem", "three further methods `find_successor`, `find_min` and `splice_out` which you can", "handle the insertion of a duplicate key is for the", "node.parent node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right", "for the `del` operator. One of the more interesting methods", "a node that does not have a left child. \"\"\"", "this decision process may look like: \"\"\" elif node.has_one_child(): promoted_node", "one of Python’s magic methods—in this case `__getitem__`. Just like", "method is to handle presence and absence of a root", "explicitly keep track of the parent as an attribute of", "and then update the right child reference of the parent", "function is written recursively following the steps outlined above. Notice", "below. \"\"\" def remove(self, node): if node.is_leaf() and node.parent is", "we overload one of Python’s magic methods—in this case `__getitem__`.", "operate lazily, in this case we use the `yield` keyword", "`BinarySearchTree` class methods much easier. The constructor for a `TreeNode`,", "promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\" ![Deleting node 25, a node", "to be used as a flexible helper method for other", "update the left child reference of the parent to point", "these helper functions help to classify a node according to", "the parent. One important problem with our implementation of insert", "children successor = node.find_successor() if successor: successor.splice_out() node.key = successor.key", "iteration, it really is recursive! Our full implementation of `TreeNode`", "core `get` functionality to `_get`. The search code in the", "node.val = successor.val \"\"\" The code to find the successor", "the helper methods `find_successor` and `find_min` to find the successor.", "handle the third case is shown below. Notice that we", "Finally, we turn our attention to the most challenging method", "= key self.val = val self.left = left self.right =", "subtree. You should convince yourself that the minimum valued key", "we simply search the tree recursively until we get to", "self.root.__iter__() def __setitem__(self, key, val): if self.root: self._put(key, val, self.root)", "interface for our `BinarySearchTree` instances, we override the `__setitem__` magic", "shown below. \"\"\" def remove(self, node): if node.is_leaf() and node.parent", "find the successor is shown above and as you can", "the case where the current node has a left child.", "is for the value associated with the new key to", "node.key == key: return node if key < node.key: return", "with two children](figures/binary-search-tree-delete-3.png) The code to handle the third case", "inserting a new node in the tree. The `_put` function", "= self def __iter__(self): if self is None: return if", "as the root of the tree. If a root node", "the val of the node is returned. Again, inorder to", "is unlikely that we can simply promote one of them", "that there are six cases to consider. Since the cases", "now as we will return to them later when discussing", "self.left) def has_any_children(self): return self.right or self.left def has_both_children(self): return", "have found the position in the tree where the new", "no more than one child, so we know how to", "Now that we have our `TreeNode` class we can begin", "minimum key in a subtree. You should convince yourself that", "key): if self.size > 1: node_to_remove = self._get(key, self.root) if", "In either case if the key is not found the", "is the left child of its parent, then the parent", "the tree only has a single node, that means we", "> 1: node_to_remove = self._get(key, self.root) if node_to_remove: self.remove(node_to_remove) self.size", "the root. Code for this decision process may look like:", "at a way to find the successor shortly. The successor", "lightly shaded nodes indicate the nodes that were visited during", "as a child, (left or right) and the kind of", "the new key will never be found during a search.", "tree: the deletion of a key. The first task is", "one child, so we know how to remove it using", "`node` is passed to the new tree as the parent.", "the node has a right child, then the successor is", "has a left child. The decision proceeds as follows: 1.", "right) child to search, we have found the position in", "tree. We call this node the **successor**, and we will", "__getitem__(self, key): if self.root: result = self._get(key, self.root) if result:", "code for this case is shown in the next code", "track of the parent as an attribute of each node.", "binary search tree: the deletion of a key. The first", "its own position as a child, (left or right) and", "node’s place. We can, however, search the tree for a", "use the `yield` keyword to define our `__iter__` method as", "there are three cases that we must consider: 1. The", "a matching key is found, the value stored in the", "the `BinarySearchTree`. The `__contains__` method will simply call `_get` and", "methods `find_successor`, `find_min` and `splice_out` which you can ignore for", "should convince yourself that the minimum valued key in any", "promoted_node.parent = self.parent \"\"\" Now that we have our `TreeNode`", "node.is_right_child(): promoted_node.parent = node.parent node.parent.right = promoted_node else: node.replace_node_data( promoted_node.key,", "`key`, `val`, `left`, and `right` data by calling the `replace_node_data`", "return elif self.size == 1 and self.root.key == key: self.root", "key. The result of this is that the node with", "node has no right child and is the left child", "its parent. The code for this case is shown in", "the tree. If the tree has more than one node", "self.TreeNodeClass(key, val, parent=node) else: if node.right: self._put(key, val, node.right) else:", "in the tree in order. You already know how to", "the insertion process. ![Inserting a node with key = 19](figures/binary-search-tree-put.png)", "has a single node, that means we are removing the", "`put` calls the private, recursive, helper function `_put` to search", "trees that cause an inorder traversal to print out the", "for other `BinarySearchTree` methods that may need to make use", "the minimum key in a subtree. You should convince yourself", "the successor of its parent, excluding this node. The first", "will look at a way to find the successor shortly.", "`__getitem__`, this allows `_get` to be used as a flexible", "on the root. Code for this decision process may look", "3. If the current node has no parent, it must", "2. If the current node is a right child then", "node_to_remove: self.remove(node_to_remove) self.size = self.size - 1 return elif self.size", "to the current node’s left child. 2. If the current", "\"\"\" elif node.has_one_child(): promoted_node = node.left or node.right if node.is_left_child():", "new tree as the parent. One important problem with our", "None else: node.parent.right = None \"\"\" ![Deleting Node 16, a", "previous step. The code below shows the Python code for", "self.size = self.size - 1 return raise KeyError('Error, key not", "self.left: self.left.parent = self if self.right: self.right.parent = self def", "close attention to the `__iter__` implementation as at first glance", "return None if self.is_left_child(): return self.parent self.parent.right = None successor", "node is the right child of its parent, and itself", "a left child. The decision proceeds as follows: 1. If", "The result of this is that the node with the", "KeyError def _get(self, key, node): if not node: return None", "a `__contains__` method for the `BinarySearchTree`. The `__contains__` method will", "the tree where the new node should be installed. -", "key to replace the old value. We leave fixing this", "functions, is shown below. As you can see, many of", "key. The first task is to find the node to", "inserting a new node into a binary search tree. The", "how to remove it using the two cases for deletion", "return self._get(key, node.left) return self._get(key, node.right) \"\"\" Using `_get`, we", "val self.left = left self.right = right self.parent = parent", "calls `__iter__` so is recursive for elem in self.left: yield", "be deleted. ![Deleting node 5, a node with two children](figures/binary-search-tree-delete-3.png)", "the node has no right child and is the left", "right changes. We could call `delete` recursively, but then we", "promoted_node promoted_node.parent = self.parent \"\"\" Now that we have our", "of these helper functions help to classify a node according", "the tree, create a new `TreeNode` object and insert the", "be deleted has only one child 3. The node to", "in the next code sample. As you look at this", "not recursive: in fact, because `__iter__` overrides the `for x", "insert the object at the point discovered in the previous", "replace the `key`, `val`, `left`, and `right` data by calling", "left child. 2. If the current node is a right", "a node with two children](figures/binary-search-tree-delete-3.png) The code to handle the", "self.is_leaf(): if self.is_left_child(): self.parent.left = None else: self.parent.right = None", "this case we will just replace the `key`, `val`, `left`,", "a subtree. You should convince yourself that the minimum valued", "1 def _put(self, key, val, node): if key < node.key:", "`TreeNode` besides the val. \"\"\" def __getitem__(self, key): if self.root:", "tree. Therefore the `find_min` method simply follows the `left` references", "a non-matching leaf node or find a matching key. When", "below. \"\"\" def __contains__(self, key): return bool(self._get(key, self.root)) \"\"\" Finally,", "to print out the nodes in the tree from smallest", "return self.size def __iter__(self): return self.root.__iter__() def __setitem__(self, key, val):", "node that will do this is the node that has", "if successor: successor.splice_out() node.key = successor.key node.val = successor.val \"\"\"", "`put`ing to and `get`ing from the tree, so we begin", "successor: 1. If the node has a right child, then", "exercise for you. \"\"\" class BinarySearchTree(object): TreeNodeClass = TreeNode def", "children 2. The node to be deleted has only one", "remove it using the two cases for deletion that we", "define our `__iter__` method as a Python generator. Pay close", "If the current node is a left child then we", "the current node, and then update the left child reference", "then it is unlikely that we can simply promote one", "current node is a right child then we only need", "function `_put` to search the tree according to the following", "to either having a left or right child we will", "cause an inorder traversal to print out the nodes in", "self.parent.right == self def is_leaf(self): return not (self.right or self.left)", "of the existing left and right subtrees. The node that", "changes. We could call `delete` recursively, but then we would", "left child to point to the parent of the current", "the current node. If the new key is less than", "as an exercise for you. \"\"\" class BinarySearchTree(object): TreeNodeClass =", "overload one of Python’s magic methods—in this case `__getitem__`. Just", "you look at this code you will see that there", "look at this code you will see that there are", "first glance you might think that the code is not", "our implementation of insert is that duplicate keys are not", "- To add a node to the tree, create a", "in the right subtree of the node having the original", "steps outlined above. Notice that when a new child is", "will see that there are six cases to consider. Since", "= None successor = self.parent.find_successor() self.parent.right = self return successor", "yield elem yield self.key if self.right: # recurse again for", "same properties of binary search trees that cause an inorder", "code for inserting a new node in the tree. The", "is no left (or right) child to search, we have", "As you can see, many of these helper functions help", "to `__getitem__`, this allows `_get` to be used as a", "makes use of the same properties of binary search trees", "the cases are symmetric with respect to either having a", "`__setitem__` magic method. In this method we first check to", "kind of children the node has. The `TreeNode` class will", "the `__iter__` implementation as at first glance you might think", "\"\"\" Once we’ve found the node containing the key we", "__contains__(self, key): return bool(self._get(key, self.root)) \"\"\" Finally, we turn our", "largest. There are three cases to consider when looking for", "either having a left or right child we will just", "core functionality of this class will be to enable `put`ing", "key node. \"\"\" else: # has both children successor =", "one child 3. The node to be deleted has two", "it as the root of the tree. If a root", "The third case is the most difficult case to handle", "from the tree, so we begin our implementation with the", "else: self.root = self.TreeNodeClass(key, val) self.size = self.size + 1", "discovered in the previous step. The code below shows the", "that we have our `TreeNode` class we can begin to", "that we have already implemented. Once the successor has been", "is delete the node and remove the reference to this", "subtree of the node having the original key. The result", "the value associated with the new key to replace the", "To add a node to the tree, create a new", "the successor to this node is the successor of its", "a root node, and delegates the core `get` functionality to", "two cases for deletion that we have already implemented. Once", "three cases to consider when looking for the successor: 1.", "an attribute of each node. You will see why this", "in order. You already know how to traverse a binary", "node is already in place then `put` calls the private,", "If the current node has no parent, it must be", "to see if the tree already has a root. If", "no parent, it must be the root. In this case", "return self.has_any_children() and not self.has_both_children() def replace_node_data(self, key, val, left,", "= self.size - 1 return elif self.size == 1 and", "place then `put` calls the private, recursive, helper function `_put`", "can begin to write `BinarySearchTree` itself. Recall that the core", "of the tree, search the binary tree comparing the new", "either case if the key is not found the `del`", "process. ![Inserting a node with key = 19](figures/binary-search-tree-put.png) Once the", "if key < node.key: return self._get(key, node.left) return self._get(key, node.right)", "to traverse a binary tree in order, using the `inorder`", "methods of `TreeNode` provides an interface to simply iterate over", "right if self.left: self.left.parent = self if self.right: self.right.parent =", "full implementation of `TreeNode` is provided below. It includes three", "Python’s magic methods—in this case `__getitem__`. Just like with `__setitem__`,", "to handle presence and absence of a root node, and", "we override the `__setitem__` magic method. In this method we", "= successor.key node.val = successor.val \"\"\" The code to find", "as you can see is a method of the `TreeNode`", "left child of its parent, then the parent is the", "a given key. The `get` functionality is even easier than", "the `tree[1] = 'foo'` style assignment interface for our `BinarySearchTree`", "our `BinarySearchTree` instances, we override the `__setitem__` magic method. In", "node should be installed. - To add a node to", "It includes three further methods `find_successor`, `find_min` and `splice_out` which", "not in tree') def __delitem__(self, key): self.delete(key) \"\"\" Once we’ve", "is found, the value stored in the val of the", "to them later when discussing deletion. \"\"\" class TreeNode(object): def", "The `TreeNode` class provides many helper functions that make the", "the tree is constructed, the next task is to implement", "that cause an inorder traversal to print out the nodes", "implementation for the `del` operator. One of the more interesting", "parent=node) else: if node.right: self._put(key, val, node.right) else: node.right =", "found the node containing the key we want to delete,", "more complicated (see below). If a node has only a", "shortly. The successor is guaranteed to have no more than", "an error. \"\"\" def delete(self, key): if self.size > 1:", "simply call `_get` and return `True` if `_get` returns a", "The constructor for a `TreeNode`, along with these helper functions,", "key, val, node): if key < node.key: if node.left: self._put(key,", "challenging method in the binary search tree: the deletion of", "new key will never be found during a search. A", "we know how to remove it using the two cases", "slightly more complicated (see below). If a node has only", "the successor has been removed, we simply put it in", "enable a `tree[1]` retrieval interface, we overload one of Python’s", "If the node has no right child and is the", "Using `_get`, we can implement the `in` operation by writing", "of the node to be deleted. ![Deleting node 5, a", "to the current node’s right child. 3. If the current", "deleted has only one child 3. The node to be", "deleting a node from a binary search tree. The `find_min`", "in place of the node to be deleted. ![Deleting node", "tree in order. You already know how to traverse a", "that were visited during the insertion process. ![Inserting a node", "right child of its parent, and itself has no right", "value. We leave fixing this bug as an exercise for", "TreeNodeClass = TreeNode def __init__(self): self.root = None self.size =", "value associated with the new key to replace the old", "is not found the `del` operator raises an error. \"\"\"", "= None else: promoted_node = self.left or self.right if self.is_left_child():", "is not None: if node == node.parent.left: node.parent.left = None", "`splice_out` which you can ignore for now as we will", "keys in the tree in order. You already know how", "`BinarySearchTree`. The `__contains__` method will simply call `_get` and return", "for us when deleting a node from a binary search", "in the right subtree. 2. If the node has no", "straightforward. If the current node has no children all we", "to take the place of its parent. The code for", "Code for this decision process may look like: \"\"\" elif", "current node. If the new key is less than the", "return self._get(key, node.right) \"\"\" Using `_get`, we can implement the", "new key to the key in the current node. If", "we search using the `_get` method to find the `TreeNode`", "node we search using the `_get` method to find the", "as at first glance you might think that the code", "is that it goes directly to the node we want", "a way to find the successor shortly. The successor is", "iterate over all the keys in the tree in order.", "self.parent.right = None successor = self.parent.find_successor() self.parent.right = self return", "method to find the `TreeNode` that needs to be removed.", "is provided below. It includes three further methods `find_successor`, `find_min`", "= self.TreeNodeClass(key, val, parent=node) else: if node.right: self._put(key, val, node.right)", "simply search the tree recursively until we get to a", "subtree. 2. If the node has no right child and", "has_both_children(self): return self.right and self.left def has_one_child(self): return self.has_any_children() and", "self.is_left_child(): self.parent.left = promoted_node else: self.parent.right = promoted_node promoted_node.parent =", "`get` functionality is even easier than the `put` functionality because", "functionality is even easier than the `put` functionality because we", "\"\"\" ![Deleting Node 16, a node without children](figures/binary-search-tree-delete-1.png) The second", "simply follows the `left` references in each node of the", "if node.left: self._put(key, val, node.left) else: node.left = self.TreeNodeClass(key, val,", "only slightly more complicated (see below). If a node has", "the node we want to splice out and makes the", "looking for the successor: 1. If the node has a", "promoted_node.left, promoted_node.right ) \"\"\" ![Deleting node 25, a node that", "for inserting a new node into a binary search tree.", "promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\" ![Deleting", "node is returned. Again, inorder to enable a `tree[1]` retrieval", "recursively until we get to a non-matching leaf node or", "the tree, so we begin our implementation with the `put`", "successor, we make use of the method `splice_out`. The reason", "smallest key in the right subtree. 2. If the node", "than the current node, search the left subtree. If the", "In order to enable the `tree[1] = 'foo'` style assignment", "one that matters for us when deleting a node from", "node’s left child. 2. If the current node is a", "is shown in the next code sample. As you look", "guaranteed to have no more than one child, so we", "the method `splice_out`. The reason we use `splice_out` is that", "no children all we need to do is delete the", "reason we use `splice_out` is that it goes directly to", "def find_successor(self): if self.right: return self.right.find_min() if self.parent is None:", "of other data from the `TreeNode` besides the val. \"\"\"", "the parent as an attribute of each node. You will", "we use `splice_out` is that it goes directly to the", "return current def splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left =", "associated with the new key to replace the old value.", "= None else: node.parent.right = None \"\"\" ![Deleting Node 16,", "when discussing deletion. \"\"\" class TreeNode(object): def __init__(self, key, val,", "self.parent.left == self def is_right_child(self): return self.parent and self.parent.right ==", "as a Python generator. Pay close attention to the `__iter__`", "If a root node is already in place then `put`", "that is to be deleted. In either case if the", "delete(self, key): if self.size > 1: node_to_remove = self._get(key, self.root)", "self.root = None self.size = 0 def __len__(self): return self.size", "next task is to implement the retrieval of a value", "One of the more interesting methods of `TreeNode` provides an", "key is for the value associated with the new key", "take the place of its parent. The code for this", "unlikely that we can simply promote one of them to", "consider when looking for the successor: 1. If the node", "properly. As our tree is implemented a duplicate key will", "removing the root of the tree, but we still must", "the `for x in` operation for iteration, it really is", "interesting methods of `TreeNode` provides an interface to simply iterate", "print out the nodes in the tree from smallest to", "2. If the node has no right child and is", "if self.right: return self.right.find_min() if self.parent is None: return None", "a new `TreeNode` object and insert the object at the", "turn our attention to the most challenging method in the", "like with `__setitem__`, the primary purpose of this method is", "object at the point discovered in the previous step. The", "in any binary search tree is the leftmost child of", "def is_right_child(self): return self.parent and self.parent.right == self def is_leaf(self):", "`BinarySearchTree` itself. Recall that the core functionality of this class", "a binary search tree. The `find_min` method is called to", "functions help to classify a node according to its own", "could call `delete` recursively, but then we would waste time", "parent, excluding this node. The first condition is the only", "key is greater than the current node, search the right", "class will also explicitly keep track of the parent as", "each node. You will see why this is important when", "a Python generator. Pay close attention to the `__iter__` implementation", "do is delete the node and remove the reference to", "is constructed, the next task is to implement the retrieval", "elem yield self.key if self.right: # recurse again for elem", "self.is_left_child(): self.parent.left = None else: self.parent.right = None else: promoted_node", "`del` operator raises an error. \"\"\" def delete(self, key): if", "new key is greater than the current node, search the", "0 def __len__(self): return self.size def __iter__(self): return self.root.__iter__() def", "code for this case is shown below. \"\"\" def remove(self,", "successor.splice_out() node.key = successor.key node.val = successor.val \"\"\" The code", "deleted. ![Deleting node 5, a node with two children](figures/binary-search-tree-delete-3.png) The", "elem in self.right: yield elem def find_successor(self): if self.right: return", "the right subtree of the node having the original key.", "new node should be installed. - To add a node", "of the parent to point to the current node’s left", "self def __iter__(self): if self is None: return if self.left:", "node.is_left_child(): promoted_node.parent = node.parent node.parent.left = promoted_node elif node.is_right_child(): promoted_node.parent", "the successor shortly. The successor is guaranteed to have no", "None if self.is_left_child(): return self.parent self.parent.right = None successor =", "this node. The first condition is the only one that", "search tree. The `find_min` method is called to find the", "that when a new child is inserted into the tree,", "even easier than the `put` functionality because we simply search", "code in the `_get` method uses the same logic for", "node or find a matching key. When a matching key", "case where the current node has a left child. The", "`find_min` and `splice_out` which you can ignore for now as", "method `splice_out`. The reason we use `splice_out` is that it", "we create a new `TreeNode` and set it as the", "shown below. Notice that we make use of the helper", "self.parent \"\"\" Now that we have our `TreeNode` class we", "the private, recursive, helper function `_put` to search the tree", "be found during a search. A better way to handle", "during a search. A better way to handle the insertion", "the node with the new key will never be found", "the binary search tree relationships for both of the existing", "is implemented a duplicate key will create a new node", "promote the child to take the place of its parent.", "or find a matching key. When a matching key is", "in this case we use the `yield` keyword to define", "decision proceeds as follows: 1. If the current node is", "![Inserting a node with key = 19](figures/binary-search-tree-put.png) Once the tree", "only one child 3. The node to be deleted has", "we want our iterator to operate lazily, in this case", "there is no left (or right) child to search, we", "it using the two cases for deletion that we have", "in self.left: yield elem yield self.key if self.right: # recurse", "node == node.parent.left: node.parent.left = None else: node.parent.right = None", "our iterator to operate lazily, in this case we use", "As our tree is implemented a duplicate key will create", "(self.right or self.left) def has_any_children(self): return self.right or self.left def", "and not self.has_both_children() def replace_node_data(self, key, val, left, right): self.key", "see is a method of the `TreeNode` class. This code", "operation for iteration, it really is recursive! Our full implementation", "already in place then `put` calls the private, recursive, helper", "key, val): if self.root: self._put(key, val, self.root) else: self.root =", "the work done in the `BinarySearchTree` class methods much easier.", "of binary search trees that cause an inorder traversal to", "the new key to the key in the current node.", "key): if self.root: result = self._get(key, self.root) if result: return", "keyword to define our `__iter__` method as a Python generator.", "method in the binary search tree: the deletion of a", "to find the successor shortly. The successor is guaranteed to", "successor. 3. If the node is the right child of", "traversal to print out the nodes in the tree from", "successor is the smallest key in the right subtree. 2.", "matching key. When a matching key is found, the value", "to replace the one scheduled for deletion. What we need", "tree already has a root. If there is not a", "The diagram below illustrates the process for inserting a new", "node that will preserve the binary search tree relationships for", "right child, then the successor is the smallest key in", "new `TreeNode` object and insert the object at the point", "promoted_node = self.left or self.right if self.is_left_child(): self.parent.left = promoted_node", "presence and absence of a root node, and delegates the", "called to find the minimum key in a subtree. You", "in` operation for iteration, it really is recursive! Our full", "The decision proceeds as follows: 1. If the current node", "with the new key will never be found during a", "self.size = self.size - 1 return elif self.size == 1", "= promoted_node else: node.replace_node_data( promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\"", "replace the old value. We leave fixing this bug as", "current.left return current def splice_out(self): if self.is_leaf(): if self.is_left_child(): self.parent.left", "right) and the kind of children the node has. The", "to enable a `tree[1]` retrieval interface, we overload one of", "Starting at the root of the tree, search the binary", "style assignment interface for our `BinarySearchTree` instances, we override the", "follows the `left` references in each node of the subtree", "-*- \"\"\" The `TreeNode` class provides many helper functions that", "may need to make use of other data from the", "a root. If there is not a root then we", "attention to the `__iter__` implementation as at first glance you", "are symmetric with respect to either having a left or", "current node, and then update the left child reference of", "child we will just discuss the case where the current", "current node has a left child. The decision proceeds as", "promoted_node.key, promoted_node.val, promoted_node.left, promoted_node.right ) \"\"\" ![Deleting node 25, a", "of insert is that duplicate keys are not handled properly.", "duplicate keys are not handled properly. As our tree is", "tree, search the binary tree comparing the new key to", "to splice out and makes the right changes. We could", "we have our `TreeNode` class we can begin to write", "node that can be used to replace the one scheduled", "proceeds as follows: 1. If the current node is a", "see if the tree already has a root. If there", "we use the `yield` keyword to define our `__iter__` method", "self.parent.find_successor() self.parent.right = self return successor def find_min(self): current =", "result = self._get(key, self.root) if result: return result.val raise KeyError", "1. The node to be deleted has no children 2.", "easier. The constructor for a `TreeNode`, along with these helper", "self.val = val self.left = left self.right = right if", "searching the tree. If the tree has more than one", "The `_put` function is written recursively following the steps outlined", "the `BinarySearchTree` class methods much easier. The constructor for a", "# recurse again for elem in self.right: yield elem def", "case is only slightly more complicated (see below). If a", "for `__contains__` is shown below. \"\"\" def __contains__(self, key): return", "the binary tree comparing the new key to the key", "then we can simply promote the child to take the", "self.right.find_min() if self.parent is None: return None if self.is_left_child(): return", "a new node into a binary search tree. The lightly", "from a binary search tree. The `find_min` method is called", "the current node is a left child then we only", "code is not recursive: in fact, because `__iter__` overrides the", "self.parent.left = None else: self.parent.right = None else: promoted_node =", "`__iter__` implementation as at first glance you might think that", "where the new node should be installed. - To add", "is to handle presence and absence of a root node,", "you can see is a method of the `TreeNode` class.", "the tree in order. You already know how to traverse", "the code is not recursive: in fact, because `__iter__` overrides", "to enable `put`ing to and `get`ing from the tree, so", "successor to this node is the successor of its parent,", "tree only has a single node, that means we are", "returns a `TreeNode` to `__getitem__`, this allows `_get` to be", "root of the tree, but we still must check to", "containing the key we want to delete, there are three", "tree, the `node` is passed to the new tree as", "we are removing the root of the tree, but we", "= self.size + 1 def _put(self, key, val, node): if", "the key of the root matches the key that is", "first check to see if the tree already has a", "node, that means we are removing the root of the", "to be removed. If the tree only has a single", "method of the `TreeNode` class. This code makes use of", "a new `TreeNode` and set it as the root of", "successor.key node.val = successor.val \"\"\" The code to find the", "this is important when we discuss the implementation for the", "shown below. \"\"\" def __contains__(self, key): return bool(self._get(key, self.root)) \"\"\"", "single child](figures/binary-search-tree-delete-2.png) The third case is the most difficult case", "def _get(self, key, node): if not node: return None if", "important problem with our implementation of insert is that duplicate", "root. If there is not a root then we create", "child, then we can simply promote the child to take", "cases that we must consider: 1. The node to be", "self.root) else: self.root = self.TreeNodeClass(key, val) self.size = self.size +", "task is to implement the retrieval of a value for", "think that the code is not recursive: in fact, because", "The code to handle the third case is shown below.", "as an attribute of each node. You will see why", "for now as we will return to them later when", "are removing the root of the tree, but we still", "The first task is to find the node to delete", "- 1 return elif self.size == 1 and self.root.key ==", "enable `put`ing to and `get`ing from the tree, so we", "us when deleting a node from a binary search tree.", "self.size - 1 return elif self.size == 1 and self.root.key", "classify a node according to its own position as a", "helper functions help to classify a node according to its", "node.parent.left: node.parent.left = None else: node.parent.right = None \"\"\" ![Deleting", "node without children](figures/binary-search-tree-delete-1.png) The second case is only slightly more", "new node with the same key value in the right", "more interesting methods of `TreeNode` provides an interface to simply", "find the successor. To remove the successor, we make use", "has the next-largest key in the tree. We call this", "node.key: if node.left: self._put(key, val, node.left) else: node.left = self.TreeNodeClass(key,", "None self.size = 0 def __len__(self): return self.size def __iter__(self):", "handle (see below). If a node has two children, then", "self.root) if node_to_remove: self.remove(node_to_remove) self.size = self.size - 1 return", "and then update the left child reference of the parent", "that we can simply promote one of them to take", "never be found during a search. A better way to", "for the value associated with the new key to replace", "the `_get` method to find the `TreeNode` that needs to", "need to make use of other data from the `TreeNode`", "`_put` function is written recursively following the steps outlined above.", "tree for a node that can be used to replace", "If the current node has no children all we need", "than the `put` functionality because we simply search the tree", "reference of the left child to point to the parent", "in the parent. The code for this case is shown", "successor def find_min(self): current = self while current.left: current =", "need is a node that will preserve the binary search", "used to replace the one scheduled for deletion. What we", "at this code you will see that there are six", "return self.right.find_min() if self.parent is None: return None if self.is_left_child():", "to delete, there are three cases that we must consider:", "when a new child is inserted into the tree, the", "a single child](figures/binary-search-tree-delete-2.png) The third case is the most difficult", "inorder traversal to print out the nodes in the tree", "position in the tree where the new node should be", "node to be deleted has no children 2. The node", "code to handle the third case is shown below. Notice", "self.size = self.size + 1 def _put(self, key, val, node):", "with the `put` functionality. In order to enable the `tree[1]", "1 return raise KeyError('Error, key not in tree') def __delitem__(self,", "self.right = right if self.left: self.left.parent = self if self.right:", "child, (left or right) and the kind of children the", "all the keys in the tree in order. You already", "key < node.key: return self._get(key, node.left) return self._get(key, node.right) \"\"\"", "matches the key that is to be deleted. In either", "be to enable `put`ing to and `get`ing from the tree,", "to classify a node according to its own position as", "self.left = left self.right = right if self.left: self.left.parent =", "elif self.size == 1 and self.root.key == key: self.root =", "child. The decision proceeds as follows: 1. If the current", "for a `TreeNode`, along with these helper functions, is shown", "means we are removing the root of the tree, but", "subtree. If the new key is greater than the current", "shown below. As you can see, many of these helper", "visited during the insertion process. ![Inserting a node with key", "a node with key = 19](figures/binary-search-tree-put.png) Once the tree is", "in the `BinarySearchTree` class methods much easier. The constructor for", "in self.right: yield elem def find_successor(self): if self.right: return self.right.find_min()", "= 'foo'` style assignment interface for our `BinarySearchTree` instances, we", "key in a subtree. You should convince yourself that the", "\"\"\" ![Deleting node 25, a node that has a single", "the tree for a node that can be used to", "tree. The `find_min` method is called to find the minimum", "node: return None if node.key == key: return node if", "it reaches a node that does not have a left", "retrieval interface, we overload one of Python’s magic methods—in this", "`find_min` to find the successor. To remove the successor, we", "error. \"\"\" def delete(self, key): if self.size > 1: node_to_remove", "along with these helper functions, is shown below. As you", "need to update the parent reference of the right child", "__iter__(self): if self is None: return if self.left: # `in`", "goes directly to the node we want to splice out", "binary search tree relationships for both of the existing left", "def has_any_children(self): return self.right or self.left def has_both_children(self): return self.right", "if self.right: self.right.parent = self def __iter__(self): if self is", "search code in the `_get` method uses the same logic", "self def is_leaf(self): return not (self.right or self.left) def has_any_children(self):", "add a node to the tree, create a new `TreeNode`", "a node that has a single child](figures/binary-search-tree-delete-2.png) The third case", "of this method is to handle presence and absence of" ]
[ "if inspect.isclass(component) or inspect.isroutine(component): # The component is a class", "remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty( component, target,", "show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace # component can be", "line is fewer than eg 120 characters. if isinstance(result, six.string_types):", "2.0 (the \"License\"); # you may not use this file", "executed. If not supplied, then the command is taken from", "Fire command cannot be executed. These exceptions are not raised", "of the function call. consumed_args: The args that were consumed", "been explicitly specified. if remaining_args and accepts_positional_args: # Use a", "try to access a member. arg = remaining_args[0] try: index", "Fire CLI.\"\"\" return completion.Script(name, component) class FireError(Exception): \"\"\"Exception used by", "is determined by the remaining arguments. Args: value: The string", "= [] if not args: return kwargs, remaining_kwargs, remaining_args skip_argument", "['-h', '--help']: if help_flag in component_trace.elements[-1].args: command = '{cmd} --", "TODO: Ensure line is fewer than eg 120 characters. if", "fn_args. This returns the values of the args as strings.", "is updated by using the command arguments to either access", "stdout. --separator SEPARATOR: Use SEPARATOR in place of the default", "from fire import completion from fire import decorators from fire", "or kwargs names. fn_keywords: The argument name for **kwargs, or", "initial_component) component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if name is", "value is not None: # A value is specified at", "is None: # Use the command line args by default", "example, to get help for a command you might run:", "go after a separating \"--\". For example, to get help", "from SystemExit, so clients may explicitly catch it with `except", "caller[0] caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals context = {}", "but maybe it is as # another type. # TODO:", "that component, consuming one arg in the process. Args: component:", "only ever be consumed up to a separator; a single", "the resulting current component is the final result. Raises: ValueError:", "remaining_args, capacity = _CallCallable( component, remaining_args) # Update the trace.", "component as the current component. 2a. If the current component", "result of executing the Fire command. Execution begins with the", "the next. got_argument = False keyword = argument[2:] contains_equals =", "to consume the argument as a keyword arg, we either:", "The proper way to show help is {cmd}.\\n' 'Showing help", "--trace: Get the Fire Trace for the command. \"\"\" from", "Fire. args: A list of args to consume in Firing", "result = fn(*varargs, **kwargs) return result, consumed_args, remaining_args, capacity def", "to a single line string.\"\"\" # TODO: Ensure line is", "'_'. ] for arg_name in arg_names: if arg_name in members:", "or os.path.basename(sys.argv[0]) # Get args as a list. if isinstance(command,", "move serialization to it's own module. result = component_trace.GetResult() if", "becomes the result of the preceding operation. For example \"command", "anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result,", "= [] used_separator = False if separator in remaining_args: #", "that were consumed for the function call. remaining_args: The remaining", "is not None: # A value is specified at the", "of the supplied args that have not been used yet.", "function for. Returns: A parse function for fn. The parse", "keywords to values. remaining_kwargs: A list of the unused kwargs", "contains_equals: keyword, value = keyword.split('=', 1) got_argument = True elif", "args from args. Args: fn: The function to call or", "result.items() if _ComponentVisible(key, verbose)} if not result: return '{}' longest_key", "into a command line interface. Simply call the Fire function", "by Fire when a Fire command cannot be executed. These", "if fn_spec.varargs or fn_spec.varkw: # If we're allowed *varargs or", "remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator = True assert", "leftover args from the arguments to the parse function. \"\"\"", "component_trace if used_separator: # Add back in the arguments from", "arguments, evaluating functions, and instantiating classes as it goes. When", "FireTrace of components starting with component, tracing Fire's execution path", "elif isinstance(component, dict) and remaining_args: # The component is a", "License for the specific language governing permissions and # limitations", "Flags, common to all Fire CLIs, must go after a", "kwargs.items(): kwargs[key] = _ParseValue(value, None, key, metadata) return parsed_args, kwargs,", "from the fn metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if", "separator defaults to a hyphen (-), and can be overwritten", "separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace", "(int) Exit code for the Fire CLI. component_trace: (FireTrace) The", "lines = [] for key, value in result.items(): line =", "parsed_args.append(value) else: # No value has been explicitly specified. if", "return '\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns whether a component should", "might run: `command -- --help`. The available flags for all", "= component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace)", "separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args = args", "else: # It was an unnecessary separator. remaining_args = saved_args", "maybe it is as # another type. # TODO: Consider", "verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args): \"\"\"Returns a subcomponent", "None and extra_kw: raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly =", "parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive =", "def _PrintResult(component_trace, verbose=False): \"\"\"Prints the result of the Fire call", "the \"fn\" property of the initial target component, and then", "in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict):", "that come after a final isolated '--' are treated as", "'-' as '_'. ] for arg_name in arg_names: if arg_name", "in members: return members[arg_name], [arg], args[1:] raise FireError('Could not consume", "ValueError('The command argument must be a string or a sequence", "characters. if isinstance(result, six.string_types): return str(result).replace('\\n', ' ') try: #", "without a default value. kwargs: Dict with named command line", "\"\"\"Returns a subcomponent of component by consuming an arg from", "function, Fire, is the main entrypoint for Python Fire. Executes", "sequence of arguments. FireExit: When Fire encounters a FireError, Fire", "component (if it's a class). The target component begins as", "method to create a CLI. When using Fire to build", "steps performed by this method are: 1. Parse any Flag", "current component is a class, instantiate it using args from", "set(required_kwonly) - set(kwargs) if missing_kwonly: raise FireError('Missing required flags:', missing_kwonly)", "is a tuple or list; we'll try to access a", "present in the dict as a string, but maybe it", "from fire import decorators from fire import helputils from fire", "or None if **kwargs not used Returns: kwargs: A dictionary", "running the command. --completion: Write the Bash completion script for", "is selected. 3b. Generate a completion script if that flag", "remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note: _ParseArgs", "the text of the Bash completion script for a Fire", "of a FireError. The trace of the Fire program is", "to the appropriate type. Args: args: A list of arguments", "= index - num_required_args # index into the defaults. parsed_args.append(fn_defaults[default_index])", "consider # this flag to be a boolean. got_argument =", "parse function from the fn metadata applies here. parse_fns =", "used by Fire when a Fire command cannot be executed.", "elif isinstance(command, (list, tuple)): args = command elif command is", "not in remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The component", "the command as entered at the command line. Used in", "isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types, float, complex)):", "so it is still a remaining argument. remaining_args.append(argument) return kwargs,", "elif default is not None: parse_fn = default return parse_fn(value)", "or list; we'll try to access a member. arg =", "strings. They are later processed by _ParseArgs, which converts them", "args): \"\"\"Returns a subcomponent of component by consuming an arg", "name of the command as entered at the command line.", "context: A dict with the local and global variables available", "from args. 2b. If the current component is a routine,", "return component_trace if used_separator: # Add back in the arguments", "`except SystemExit` or `except FireExit`. If not caught, this exception", "serialization to it's own module. result = component_trace.GetResult() if isinstance(result,", "are: 1. Parse any Flag args (the args after the", "expecting the keyword, or we need to be # accepting", "a class or a routine; we'll try to initialize it", "def _OneLineResult(result): \"\"\"Returns result serialized to a single line string.\"\"\"", "accepting **kwargs. if got_argument: skip_argument = not contains_equals and not", "must be a string or a sequence of ' 'arguments.')", "str(result).replace('\\n', ' ') def _Fire(component, args, context, name=None): \"\"\"Execute a", "is the number of positional arguments without # default values.", "A parse function for fn. The parse function accepts a", "call to Fire. Eg: def main(argv): fire.Fire(Component) A Fire CLI", "found by consuming an arg. consumed_args: The args that were", "instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args): \"\"\"Returns", "= False for key, value in component.items(): if target ==", "parse function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args +", "positional = parse_fns['positional'] named = parse_fns['named'] if index is not", "Firing on the component, usually from the command line. context:", "from this arg or the next. got_argument = False keyword", "The component from which to get a member. args: Args", "an arg from args. 2d. Repeat 2a-2c until no args", "Fire's execution path as it consumes args. Raises: ValueError: If", "FireExit(0, component_trace) elif component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result, component_trace,", "and returns a list of the remaining arguments. Only if", "Note: num_required_args is the number of positional arguments without #", "The remaining args that haven't been consumed yet. Raises: FireError:", "arguments. Only if fn_keywords is None, this only finds argument", "value is determined by the remaining arguments. Args: value: The", "component: component = component[target.replace('-', '_')] else: # The target isn't", "= False # If we see a default get used,", "A list of values to be used as positional arguments", "Fire Flags, common to all Fire CLIs, must go after", "OF ANY KIND, either express or implied. # See the", "into a Python REPL after running the command. --completion: Write", "The parse function accepts a list of arguments and returns", "for creating CLIs from absolutely any Python object. You can", "the command in order to access a member of current", "as Component, and at each operation the component becomes the", "See the License for the specific language governing permissions and", "information. -h --help: Provide help and usage information for the", "If --completion is specified but no name available. \"\"\" args,", "to get help for a command you might run: `command", "will raise a FireExit with code 2. When used with", "isinstance(command, six.string_types): args = shlex.split(command) elif isinstance(command, (list, tuple)): args", "name = name or os.path.basename(sys.argv[0]) # Get args as a", "target in dict:', target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args", "_CallCallable(fn, args): \"\"\"Calls the function fn by consuming args from", "= remaining_args[0] component, consumed_args, remaining_args = _GetMember( component, remaining_args) filename,", "for key, value in kwargs.items(): kwargs[key] = _ParseValue(value, None, key,", "to in writing, software # distributed under the License is", "--help`. The available flags for all Fire CLIs are: -v", "governing permissions and # limitations under the License. \"\"\"Python Fire", "or a list of strings; a list of strings is", "for the Fire command. \"\"\" super(FireExit, self).__init__(code) self.trace = component_trace", "mode or completion script generation. Other arguments are consumed by", "parse(args) result = fn(*varargs, **kwargs) return result, consumed_args, remaining_args, capacity", "to be explicitly expecting the keyword, or we need to", "or agreed to in writing, software # distributed under the", "== len(args) or args[index + 1].startswith('--'))) if contains_equals: keyword, value", "target.replace('-', '_') in component: component = component[target.replace('-', '_')] else: #", "single step will never consume arguments from both sides of", "if component is last_component and remaining_args == initial_args: # We're", "call or class left to instantiate, the resulting current component", "When Fire encounters a FireError, Fire will raise a FireExit", "method includes a call to Fire. Eg: def main(argv): fire.Fire(Component)", "consume in the search for the next component. Returns: component:", "to get a member. \"\"\" members = dict(inspect.getmembers(component)) arg =", "not the varargs or kwargs names. fn_keywords: The argument name", "fn_args, fn_keywords): \"\"\"Parses the supplied arguments for keyword arguments. Given", "target component for Fire. args: A list of args to", "deserializable serialization method # and move serialization to it's own", "verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types,", "order to access a member of current component, call the", "names used by the function, specified through fn_args. This returns", "fewer than eg 120 characters. if isinstance(result, six.string_types): return str(result).replace('\\n',", "Python object into a command line interface. Simply call the", "not consume arguments:', remaining_args), initial_args) return component_trace if show_completion: if", "that haven't been consumed yet. Raises: FireError: If we cannot", "\"--\". For example, to get help for a command you", "separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args", "Fire. Executes a command either from the `command` argument or", "is taken from sys.argv instead. This can be a string", "fn: The function to call or class to instantiate. args:", "named[arg] elif default is not None: parse_fn = default return", "command on a target component using the args supplied. Arguments", "is specified but no name available. \"\"\" args, flag_args =", "compliance with the License. # You may obtain a copy", "SystemExit` or `except FireExit`. If not caught, this exception will", "as entered at the command line. Used in interactive mode", "Fire on any Python object: functions, classes, modules, objects, dictionaries,", "None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def", "preceding operation. For example \"command fn arg1 arg2\" might access", "name=None): \"\"\"This function, Fire, is the main entrypoint for Python", "of ' 'arguments.') # Determine the calling context. caller =", "component_trace if show_completion: if name is None: raise ValueError('Cannot make", "None: # A value is specified at the command line.", "NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result))", "(-), and can be overwritten with the --separator Fire argument.", "entrypoint for Python Fire. Executes a command either from the", "_ParseArgs, which converts them to the appropriate type. Args: args:", "for the command. \"\"\" from __future__ import absolute_import from __future__", "this function. Args: component: The initial target component. command: Optional.", "can then be called with fn(*varargs, **kwargs). The remaining_args are", "Args: args: A list of arguments fn_args: A list of", "parse_fn = parser.DefaultParseValue # We check to see if any", "arguments will only ever be consumed up to a separator;", "kwargs names. fn_keywords: The argument name for **kwargs, or None", "component is a dict; we'll try to access a member.", "Raises: FireError: If we cannot consume an argument to get", "the function, typically from Fire decorators. Returns: parsed_args: A list", "left to instantiate, the resulting current component is the final", "dict kwargs modified with the used kwargs removed. remaining_args: A", "from fire import helputils from fire import inspectutils from fire", "and in the tab completion script. Returns: FireTrace of components", "A list of the unused arguments from the original args.", "print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help: result =", "For the current component, only use arguments up to the", "A value is specified at the command line. value =", "contains_equals and not is_bool_syntax arg_consumed = True if keyword in", "component variables['trace'] = component_trace if instance is not None: variables['self']", "not use this file except in compliance with the License.", "elif command is None: # Use the command line args", "as positional arguments. metadata: Metadata about the function, typically from", "import pipes import shlex import sys import types from fire", "command. \"\"\" from __future__ import absolute_import from __future__ import division", "saved_args = remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator =", "value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) else: # No", "skip_argument = False continue arg_consumed = False if argument.startswith('--'): #", "_ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses the", "you may not use this file except in compliance with", "remaining_args = [] if not args: return kwargs, remaining_kwargs, remaining_args", "it is still a remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs,", "component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result =", "elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif", "arguments, but not the varargs or kwargs names. fn_defaults: A", "string.\"\"\" # TODO: Ensure line is fewer than eg 120", "'arg2'. Additional examples are available in the examples directory. Fire", "member from component using an arg from args. 2d. Repeat", "call could have taken args in place of defaults. Raises:", "isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine(", "# No value has been explicitly specified. if remaining_args and", "script. Returns: FireTrace of components starting with component, tracing Fire's", "0 <= index < len(positional): parse_fn = positional[index] elif arg", "The name of the command. Used in interactive mode and", "current component, call the current component (if it's a function),", "access a member from component using an arg from args.", "= shlex.split(command) elif isinstance(command, (list, tuple)): args = command elif", "a FireExit exception. Args: code: (int) Exit code for the", "common to all Fire CLIs, must go after a separating", "helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose)", "len(positional): parse_fn = positional[index] elif arg in named: parse_fn =", "value in the function's argspec. arg: The name of the", "a string, but maybe it is as # another type.", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "come after a final isolated '--' are treated as Flags,", "from the arguments to the parse function. \"\"\" fn_spec =", "values in the function argspec. num_required_args: The number of required", "isolated '--' are treated as Flags, eg for interactive mode", "by consuming an arg from args. Given a starting component", "default = parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named'] if", "as the value. Constructs and returns a dictionary of these", "\"\"\" members = dict(inspect.getmembers(component)) arg = args[0] arg_names = [", "raise FireError('Missing required flags:', missing_kwonly) # If we accept *varargs,", "named: parse_fn = named[arg] elif default is not None: parse_fn", "'-'. --trace: Get the Fire Trace for the command. \"\"\"", "call that function with arguments 'arg1' and 'arg2'. Additional examples", "import types from fire import completion from fire import decorators", "value = args[index + 1] got_argument = True keyword =", "and there's no function left to call or class left", "= saved_args component_trace.AddSeparator() elif component is not last_component: remaining_args =", "your main method to create a CLI. When using Fire", "component, consumed_args, remaining_args = _GetMember( component, remaining_args) filename, lineno =", "the value is being parsed for. metadata: Metadata about the", "arg_consumed = True if keyword in fn_args or fn_keywords: kwargs[keyword]", "'_') # In order for us to consume the argument", "# treat '-' as '_'. ] for arg_name in arg_names:", "exception will cause the client program to exit without a", "etc. They all work! Python Fire turns any Python object", "= remaining_args[0] try: index = int(arg) component = component[index] except", "= remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator = True", "keyword = argument[2:] contains_equals = '=' in keyword is_bool_syntax =", "context. caller = inspect.stack()[1] caller_frame = caller[0] caller_globals = caller_frame.f_globals", "fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs metadata =", "a default value. kwargs: Dict with named command line arguments", "be used as positional arguments for calling the target function.", "capacity = parse(args) result = fn(*varargs, **kwargs) return result, consumed_args,", "initial_args = remaining_args if not remaining_args and (show_help or interactive", "members consuming arguments, evaluating functions, and instantiating classes as it", "verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args = args while", "access the \"fn\" property of the initial target component, and", "a Fire command cannot be executed. These exceptions are not", "arguments in the command in order to access a member", "arg_consumed: # The argument was not consumed, so it is", "string verbose: Whether to include 'hidden' members, those keys starting", "filename, lineno) except FireError as error: component_trace.AddError(error, initial_args) return component_trace", "a member. args: Args from which to consume in the", "Consider alternatives for accessing non-string keys. found_target = False for", "except (TypeError, ValueError): return str(result).replace('\\n', ' ') def _Fire(component, args,", "instance = component elif isinstance(component, (list, tuple)) and remaining_args: #", "os import pipes import shlex import sys import types from", "metadata: Metadata about the function, typically from Fire decorators. Returns:", "in the dict as a string, but maybe it is", "interface. Simply call the Fire function as your main method", "execution path as it consumes args. Raises: ValueError: If there", "dict to convert to a string verbose: Whether to include", "json import os import pipes import shlex import sys import", "def Fire(component=None, command=None, name=None): \"\"\"This function, Fire, is the main", "a dict as a string. Args: result: The dict to", "is None: raise ValueError('Cannot make completion script without command name')", "Start with component as the current component. 2a. If the", "arg is a Flag, we consider # this flag to", "None, metadata) varargs = parsed_args + varargs remaining_args += remaining_kwargs", "mode is selected. 3b. Generate a completion script if that", "get a member. args: Args from which to consume in", "without # default values. All of these arguments are required.", "that have not been used yet. capacity: Whether the call", "remaining arguments for *varargs. if fn_spec.varargs is not None: varargs,", "The index of the value in the function's argspec. arg:", "language governing permissions and # limitations under the License. \"\"\"Python", "dictionaries, lists, tuples, etc. They all work! Python Fire turns", "of args to consume in Firing on the component, usually", "a default get used, we'll set capacity to True #", "string or a sequence of ' 'arguments.') # Determine the", "check to see if any parse function from the fn", "a dict; we'll try to access a member. target =", "completion script without command name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script)", "**kwargs). The remaining_args are the leftover args from the arguments", "a dictionary of these keyword arguments, and returns a list", "a positional arg. value = remaining_args.pop(0) value = _ParseValue(value, index,", "as error: component_trace.AddError(error, initial_args) return component_trace if used_separator: # Add", "named arguments from the available supplied args. Modifies kwargs, removing", "list; we'll try to access a member. arg = remaining_args[0]", "are expected, but none are available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS)", "if not args: return kwargs, remaining_kwargs, remaining_args skip_argument = False", "the members of the component, or in calling a function", "argument was not consumed, so it is still a remaining", "CLIs are: -v --verbose: Include private members in help and", "target isn't present in the dict as a string, but", "will raise a FireExit with code 0 if successful. \"\"\"", "object that is the result of the function call. consumed_args:", "\"\"\"Parses the positional and named arguments from the available supplied", "with -i. instance = component elif isinstance(component, (list, tuple)) and", "argument names used by the function, specified through fn_args. This", "the original args. \"\"\" kwargs = {} remaining_kwargs = []", "fire import trace import six def Fire(component=None, command=None, name=None): \"\"\"This", "\"\"\"Parses value, a string, into the appropriate type. The function", "instantiating classes as it goes. When building a CLI with", "result of the Fire call to stdout in a human", "named argument; get its value from this arg or the", "# We check to see if any parse function from", "tab completion script. Returns: FireTrace of components starting with component,", "for help_flag in ['-h', '--help']: if help_flag in component_trace.elements[-1].args: command", "been used yet. capacity: Whether the call could have taken", "elif index < num_required_args: raise FireError( 'The function received no", "`command -- --help`. The available flags for all Fire CLIs", "run by consuming the arguments in the command in order", "Eg: def main(argv): fire.Fire(Component) A Fire CLI command is run", "inspect.stack()[1] caller_frame = caller[0] caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals", "capacity to True # Select unnamed args. parsed_args = []", "len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args, capacity return _ParseFn def", "--separator SEPARATOR: Use SEPARATOR in place of the default separator,", "= inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args)", "lineno = None component_trace.AddAccessedProperty( component, index, [arg], filename, lineno) elif", "1]) if not arg_consumed: # The argument was not consumed,", "The function used to parse value is determined by the", "For example, to get help for a command you might", "lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable( component,", "or the next arg is a Flag, we consider #", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# Select unnamed args. parsed_args = [] for index, arg", "component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args,", "True: last_component = component initial_args = remaining_args if not remaining_args", "skip_argument: skip_argument = False continue arg_consumed = False if argument.startswith('--'):", "print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace", "fn_spec.varkw is None and extra_kw: raise FireError('Unexpected kwargs present:', extra_kw)", "for calling the target function. kwargs: The input dict kwargs", "list of arguments and returns (varargs, kwargs), remaining_args. The original", "'Unable to index into component with argument:', arg) component_trace.AddError(error, initial_args)", "= metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional = parse_fns['positional']", "arg.replace('-', '_'), # treat '-' as '_'. ] for arg_name", "**kwargs, or None if **kwargs not used Returns: kwargs: A", "= remaining_args[0] if target in component: component = component[target] elif", "member from that component, consuming one arg in the process.", "at the call to Fire. name: Optional. The name of", "None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns a dict", "None lineno = None component_trace.AddAccessedProperty( component, index, [arg], filename, lineno)", "import absolute_import from __future__ import division from __future__ import print_function", "argument as a keyword arg, we either: # Need to", "class). The target component begins as Component, and at each", "member. args: Args from which to consume in the search", "variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component,", "typically from Fire decorators. Returns: value, parsed into the appropriate", "`trace` property. This exception inherits from SystemExit, so clients may", "be overwritten with the --separator Fire argument. Args: component: The", "fire import interact from fire import parser from fire import", "show_trace or show_completion): # Don't initialize the final class or", "starting with _. Returns: A string representing the dict \"\"\"", "file except in compliance with the License. # You may", "# The component is a class or a routine; we'll", "it's a function), or instantiate the current component (if it's", "for this arg. capacity = True default_index = index -", "for Fire. args: A list of args to consume in", "= True break if not found_target: error = FireError( 'Cannot", "required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) -", "ValueError('Cannot make completion script without command name') script = CompletionScript(name,", "fn_defaults, num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses the positional and named", "= remaining_args[:separator_index] used_separator = True assert separator not in remaining_args", "Given a list of arguments, finds occurences of --name value,", "remaining_args), initial_args) return component_trace if show_completion: if name is None:", "got_argument = True if keyword in fn_args: value = 'True'", "remaining_args = saved_args if component is last_component and remaining_args ==", "varargs, remaining_args = remaining_args, [] else: varargs = [] for", "arguments from the function's argspec. This is the number of", "remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates a parse function for fn.", "# call it. isclass = inspect.isclass(component) try: target = component.__name__", "if help_flag in component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING:", "added to the FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An exception raised", "command cannot be executed. These exceptions are not raised by", "remaining_args, capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args,", "\"fn\" property of the initial target component, and then call", "-- --help`. The available flags for all Fire CLIs are:", "the function, specified through fn_args. This returns the values of", "value = 'False' else: value = 'True' else: if index", "initial_args) return component_trace if used_separator: # Add back in the", "return (varargs, kwargs), consumed_args, remaining_args, capacity return _ParseFn def _ParseArgs(fn_args,", "current component is the final result. Raises: ValueError: If the", "= args[0] arg_names = [ arg, arg.replace('-', '_'), # treat", "Fire command. \"\"\" super(FireExit, self).__init__(code) self.trace = component_trace def _PrintResult(component_trace,", "class, keep an instance for use with -i. instance =", "args[1:] raise FireError('Could not consume arg:', arg) def _CallCallable(fn, args):", "= value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1]) if", "at the command line. value = _ParseValue(value, index, arg, metadata)", "num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw: # If", "property of the initial target component, and then call that", "the remaining arguments. Only if fn_keywords is None, this only", "_GetMember(component, args): \"\"\"Returns a subcomponent of component by consuming an", "if target in component: component = component[target] elif target.replace('-', '_')", "Provide help and usage information for the command. -i --interactive:", "1) lines = [] for key, value in result.items(): line", "If the initial component is a class, keep an instance", "and returns (varargs, kwargs), remaining_args. The original function fn can", "def _ParseValue(value, index, arg, metadata): \"\"\"Parses value, a string, into", "function to call or class to instantiate. args: Args from", "as a keyword arg, we either: # Need to be", "arg in enumerate(fn_args): value = kwargs.pop(arg, None) if value is", "key, value in component.items(): if target == str(key): component =", "from Fire decorators. Returns: value, parsed into the appropriate type", "The available flags for all Fire CLIs are: -v --verbose:", "[] else: varargs = [] for index, value in enumerate(varargs):", "of the unused arguments from the original args. \"\"\" kwargs", "not found_target: error = FireError( 'Cannot find target in dict:',", "the number of positional arguments without # default values. All", "by using the command arguments to either access a member", "must go after a separating \"--\". For example, to get", "main method includes a call to Fire. Eg: def main(argv):", "in enumerate(args): if skip_argument: skip_argument = False continue arg_consumed =", "to see if any parse function from the fn metadata", "= max(len(str(key)) for key in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key", "KIND, either express or implied. # See the License for", "from both sides of a separator. The separator defaults to", "FireError('Unexpected kwargs present:', extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs) if", "import helputils from fire import inspectutils from fire import interact", "typically from Fire decorators. Returns: parsed_args: A list of values", "is preferred. name: Optional. The name of the command as", "a string or a list of strings; a list of", "if isinstance(command, six.string_types): args = shlex.split(command) elif isinstance(command, (list, tuple)):", "CLI command is run by consuming the arguments in the", "keyword.split('=', 1) got_argument = True elif is_bool_syntax: # Since there's", "of the members of the component, or in calling a", "Fire to the client in the case of a FireError.", "or `except FireExit`. If not caught, this exception will cause", "args, all_args, fn_spec.varkw) # Note: _ParseArgs modifies kwargs. parsed_args, kwargs,", "result = {key: value for key, value in result.items() if", "number of required arguments from the function's argspec. This is", "index, value in enumerate(varargs): varargs[index] = _ParseValue(value, None, None, metadata)", "a Python REPL after running the command. --completion: Write the", "we need to be # accepting **kwargs. if got_argument: skip_argument", "= parse_fns['named'] if index is not None and 0 <=", "Google Inc. # # Licensed under the Apache License, Version", "None: varargs, remaining_args = remaining_args, [] else: varargs = []", "is None: component = context initial_component = component component_trace =", "finds argument names used by the function, specified through fn_args.", "else: varargs = [] for index, value in enumerate(varargs): varargs[index]", "current component (if it's a function), or instantiate the current", "appropriate type. The function used to parse value is determined", "of required arguments from the function's argspec. This is the", "Use the command line args by default if no command", "remaining_args: # For the current component, only use arguments up", "(the \"License\"); # you may not use this file except", "code: (int) Exit code for the Fire CLI. component_trace: (FireTrace)", "keys starting with _. Returns: A string representing the dict", "return result, consumed_args, remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates a parse", "module. result = component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)): for", "in remaining_args: # For the current component, only use arguments", "fire import decorators from fire import helputils from fire import", "etc. if component is None: component = context initial_component =", "None lineno = None component_trace.AddAccessedProperty( component, target, [target], filename, lineno)", "arguments from both sides of a separator. The separator defaults", "line. Used in interactive mode and for generating the completion", "elif is_bool_syntax: # Since there's no next arg or the", "if show_completion: if name is None: raise ValueError('Cannot make completion", "fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note: num_required_args is", "_ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note: _ParseArgs modifies kwargs. parsed_args,", "method are: 1. Parse any Flag args (the args after", "= [] for index, arg in enumerate(fn_args): value = kwargs.pop(arg,", "class FireError(Exception): \"\"\"Exception used by Fire when a Fire command", "= parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive", "# # Unless required by applicable law or agreed to", "classes, modules, objects, dictionaries, lists, tuples, etc. They all work!", "initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args", "types.GeneratorType)): for i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError", "FireError as error: component_trace.AddError(error, initial_args) return component_trace if last_component is", "arg. consumed_args: The args that were consumed by getting this", "the FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An exception raised by Fire", "Fire Trace for the command. \"\"\" from __future__ import absolute_import", "main entrypoint for Python Fire. Executes a command either from", "been consumed yet. capacity: Whether the call could have taken", "argspec. This is the number of arguments without a default", "a FireExit with code 2. When used with the help", "capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno, capacity) except", "flags, Fire will raise a FireExit with code 0 if", "= set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the list of", "argument in enumerate(args): if skip_argument: skip_argument = False continue arg_consumed", "exit without a stacktrace. \"\"\" def __init__(self, code, component_trace): \"\"\"Constructs", "inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args) #", "component. 2a. If the current component is a class, instantiate", "to access a member. arg = remaining_args[0] try: index =", "script. Returns: The result of executing the Fire command. Execution", "args supplied. Arguments that come after a final isolated '--'", "separator. remaining_args = saved_args if component is last_component and remaining_args", "or args[index + 1].startswith('--'))) if contains_equals: keyword, value = keyword.split('=',", "stdout in a human readable way.\"\"\" # TODO: Design human", "component_trace: (FireTrace) The trace for the Fire command. \"\"\" super(FireExit,", "library for creating CLIs from absolutely any Python object. You", "can call Fire on any Python object: functions, classes, modules,", "from the command line. context: A dict with the local", "def _ParseFn(args): \"\"\"Parses the list of `args` into (varargs, kwargs),", "alternatives for accessing non-string keys. found_target = False for key,", "which may still be used as positional arguments. metadata: Metadata", "implied. # See the License for the specific language governing", "the args for which there's no default value. # There's", "Constructs and returns a dictionary of these keyword arguments, and", "return result def CompletionScript(name, component): \"\"\"Returns the text of the", "this is the command executed. If not supplied, then the", "the parse function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args", "the arguments to the parse function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn)", "show_completion: if name is None: raise ValueError('Cannot make completion script", "result, consumed_args, remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates a parse function", "a list. if isinstance(command, six.string_types): args = shlex.split(command) elif isinstance(command,", "target component using the args supplied. Arguments that come after", "and remaining_args == initial_args: # We're making no progress. break", "all work! Python Fire turns any Python object into a", "text of the Bash completion script for a Fire CLI.\"\"\"", "_DictAsString(result, verbose=False): \"\"\"Returns a dict as a string. Args: result:", "= fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note: num_required_args", "target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args,", "remaining_kwargs, remaining_args skip_argument = False for index, argument in enumerate(args):", "traversing the target object `component`'s members consuming arguments, evaluating functions,", "function or class to create the parse function for. Returns:", "= args while True: last_component = component initial_args = remaining_args", "are required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs)", "keyword arguments. Given a list of arguments, finds occurences of", "helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire", "completion.Script(name, component) class FireError(Exception): \"\"\"Exception used by Fire when a", "CLIs, must go after a separating \"--\". For example, to", "is run by consuming the arguments in the command in", "args = shlex.split(command) elif isinstance(command, (list, tuple)): args = command", "to Fire. name: Optional. The name of the command. Used", "the function call. remaining_args: The remaining args that haven't been", "from sys.argv instead. This can be a string or a", "fn_spec.varkw: # If we're allowed *varargs or **kwargs, there's always", "consumed_args = args[:len(args) - len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args,", "number of arguments without a default value. kwargs: Dict with", "args[0] arg_names = [ arg, arg.replace('-', '_'), # treat '-'", "# The component is a tuple or list; we'll try", "[] for index, value in enumerate(varargs): varargs[index] = _ParseValue(value, None,", "of the default separator, '-'. --trace: Get the Fire Trace", "return component_trace if show_completion: if name is None: raise ValueError('Cannot", "for key, value in component.items(): if target == str(key): component", "keyword and 'value' as the value. Constructs and returns a", "component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace,", "Don't initialize the final class or call the final function", "return component_trace remaining_args = remaining_args[1:] filename = None lineno =", "their values. remaining_args: The remaining command line arguments, which may", "not caught, this exception will cause the client program to", "consume for calling the function. Returns: component: The object that", "is a named argument; get its value from this arg", "the case of a FireError. The trace of the Fire", "component_trace.AddAccessedProperty( component, target, [target], filename, lineno) elif remaining_args: # We'll", "initial_component variables['component'] = initial_component variables['result'] = component variables['trace'] = component_trace", "clients may explicitly catch it with `except SystemExit` or `except", "args. Raises: ValueError: If there are arguments that cannot be", "FireError(Exception): \"\"\"Exception used by Fire when a Fire command cannot", "it is as # another type. # TODO: Consider alternatives", "values. remaining_args: The remaining command line arguments, which may still", "(the args after the final --) 2. Start with component", "Unless required by applicable law or agreed to in writing,", "are later processed by _ParseArgs, which converts them to the", "preferred. name: Optional. The name of the command as entered", "is a Flag, we consider # this flag to be", "= [separator] + saved_args else: # It was an unnecessary", "the command. -i --interactive: Drop into a Python REPL after", "The name of the argument the value is being parsed", "break if remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args)", "whether a component should be visible in the output.\"\"\" return", "to consume in Firing on the component, usually from the", "script for the tool to stdout. --separator SEPARATOR: Use SEPARATOR", "result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0,", "saved_args component_trace.AddSeparator() elif component is not last_component: remaining_args = [separator]", "_. Returns: A string representing the dict \"\"\" result =", "Whether the call could have taken args in place of", "arguments are expected, but none are available. \"\"\" accepts_positional_args =", "metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default =", "the specific language governing permissions and # limitations under the", "with component, tracing Fire's execution path as it consumes args.", "of arguments fn_args: A list of argument names that the", "fn by consuming args from args. Args: fn: The function", "got_argument = False keyword = argument[2:] contains_equals = '=' in", "component is updated by using the command arguments to either", "elif arg in named: parse_fn = named[arg] elif default is", "by consuming the arguments in the command in order to", "# another type. # TODO: Consider alternatives for accessing non-string", "def _DictAsString(result, verbose=False): \"\"\"Returns a dict as a string. Args:", "used by the function, specified through fn_args. This returns the", "(bool, six.string_types, six.integer_types, float, complex)): print(result) elif result is not", "Fire command on a target component using the args supplied.", "used_separator = False if separator in remaining_args: # For the", "ValueError: If --completion is specified but no name available. \"\"\"", "= '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines = [] for key,", "component is a class, instantiate it using args from args.", "can be a module, class, routine, object, etc. if component", "can be overwritten with the --separator Fire argument. Args: component:", "from which to consume in the search for the next", "component: The object that is the result of the function", "are available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False #", "member of the current component, call the current component (if", "{key: value for key, value in result.items() if _ComponentVisible(key, verbose)}", "fn. The parse function accepts a list of arguments and", "a class found during the traversal. The steps performed by", "gets a member from that component, consuming one arg in", "type. The function used to parse value is determined by", "not last_component: remaining_args = [separator] + saved_args else: # It", "or completion script generation. Other arguments are consumed by the", "num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses the positional and named arguments", "string representing the dict \"\"\" result = {key: value for", "available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If", "parse_fn = named[arg] elif default is not None: parse_fn =", "(C) 2017 Google Inc. # # Licensed under the Apache", "a FireExit with code 0 if successful. \"\"\" name =", "remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The component is a", "verbose=False): \"\"\"Returns whether a component should be visible in the", "= component[index] except (ValueError, IndexError): error = FireError( 'Unable to", "or class to create the parse function for. Returns: A", "to build a CLI, your main method includes a call", "Fire decorators. Returns: parsed_args: A list of values to be", "varargs or kwargs names. fn_keywords: The argument name for **kwargs,", "\"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs metadata", "got_argument = True elif is_bool_syntax: # Since there's no next", "Whether to include 'hidden' members, those keys starting with _.", "= component_trace if instance is not None: variables['self'] = instance", "ValueError): return str(result).replace('\\n', ' ') def _Fire(component, args, context, name=None):", "not supplied, then the command is taken from sys.argv instead.", "remaining_args, capacity = parse(args) result = fn(*varargs, **kwargs) return result,", "initial target component, and then call that function with arguments", "default get used, we'll set capacity to True # Select", "value, parsed into the appropriate type for calling a function.", "show_trace = parsed_flag_args.trace # component can be a module, class,", "appropriate type for calling a function. \"\"\" parse_fn = parser.DefaultParseValue", "positional arg. value = remaining_args.pop(0) value = _ParseValue(value, index, arg,", "CompletionScript(name, component): \"\"\"Returns the text of the Bash completion script", "-i --interactive: Drop into a Python REPL after running the", "named arguments, but not the varargs or kwargs names. fn_defaults:", "from args. Given a starting component and args, this function", "except (ValueError, IndexError): error = FireError( 'Unable to index into", "isinstance(component, (list, tuple)) and remaining_args: # The component is a", "function call. remaining_args: The remaining args that haven't been consumed", "using Fire to build a CLI, your main method includes", "client in the case of a FireError. The trace of", "argument names that the target function accepts, including positional and", "the command executed. If not supplied, then the command is", "parse function for fn. The parse function accepts a list", "access a member of the component. try: target = remaining_args[0]", "context = {} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args, context,", "`command` argument or from sys.argv by recursively traversing the target", "the examples directory. Fire Flags, common to all Fire CLIs,", "context.update(caller_locals) component_trace = _Fire(component, args, context, name) if component_trace.HasError(): for", "class to create the parse function for. Returns: A parse", "final class or call the final function unless # there's", "function. Returns: component: The object that is the result of", "consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args, filename,", "current component, only use arguments up to the separator. separator_index", "interactive mode or completion script generation. Other arguments are consumed", "\"\"\" class FireExit(SystemExit): \"\"\"An exception raised by Fire to the", "arguments for calling the target function. kwargs: The input dict", "function for fn. The parse function accepts a list of", "none are available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False", "the command. --completion: Write the Bash completion script for the", "rather are caught and added to the FireTrace. \"\"\" class", "arg_consumed = False if argument.startswith('--'): # This is a named", "include 'hidden' members, those keys starting with _. Returns: A", "if instance is not None: variables['self'] = instance interact.Embed(variables, verbose)", "help is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr)", "program to exit without a stacktrace. \"\"\" def __init__(self, code,", "Python Fire. Executes a command either from the `command` argument", "remaining_args[0] if target in component: component = component[target] elif target.replace('-',", "= saved_args if component is last_component and remaining_args == initial_args:", "readable deserializable serialization method # and move serialization to it's", "num_required_args: The number of required arguments from the function's argspec.", "= component[target.replace('-', '_')] else: # The target isn't present in", "*varargs, then use all remaining arguments for *varargs. if fn_spec.varargs", "_PrintResult(component_trace, verbose=False): \"\"\"Prints the result of the Fire call to", "sys.argv instead. This can be a string or a list", "A dict with the local and global variables available at", "args that haven't been consumed yet. capacity: Whether the call", "__future__ import division from __future__ import print_function import inspect import", "function or instantiating a class found during the traversal. The", "True assert separator not in remaining_args if inspect.isclass(component) or inspect.isroutine(component):", "parse_fns['positional'] named = parse_fns['named'] if index is not None and", "using the command arguments to either access a member of", "component: The target component for Fire. args: A list of", "dictionary of these keyword arguments, and returns a list of", "fn(*varargs, **kwargs). The remaining_args are the leftover args from the", "if used_separator: # Add back in the arguments from after", "= [] for index, value in enumerate(varargs): varargs[index] = _ParseValue(value,", "occurences of --name value, and uses 'name' as the keyword", "args: Args from which to consume in the search for", "context, name=None): \"\"\"Execute a Fire command on a target component", "argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index, arg,", "= parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace # component", "result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result,", "argument. index: The index of the value in the function's", "parsed_args + varargs remaining_args += remaining_kwargs consumed_args = args[:len(args) -", "in enumerate(varargs): varargs[index] = _ParseValue(value, None, None, metadata) varargs =", "still a remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def", "about the function, typically from Fire decorators. Returns: value, parsed", "of executing the Fire command. Execution begins with the initial", "initial target component. The component is updated by using the", "catch it with `except SystemExit` or `except FireExit`. If not", "num_required_args: raise FireError( 'The function received no value for the", "taken from sys.argv instead. This can be a string or", "absolute_import from __future__ import division from __future__ import print_function import", "command arguments to either access a member of the current", "by the execution of the Fire command, eg in the", "values to be used as positional arguments for calling the", "inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) #", "fn_keywords is None, this only finds argument names used by", "verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion", "argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename", "into (varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args,", "kwargs[keyword] = value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1])", "a string, into the appropriate type. The function used to", "name of the command. Used in interactive mode and in", "the component becomes the result of the preceding operation. For", "Component, and at each operation the component becomes the result", "fire import completion from fire import decorators from fire import", "keep an instance for use with -i. instance = component", "remaining_kwargs: A list of the unused kwargs from the original", "a class). The target component begins as Component, and at", "begins with the initial target component. The component is updated", "component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine( component,", "skip_argument = not contains_equals and not is_bool_syntax arg_consumed = True", "output.\"\"\" return ( verbose or not isinstance(component, six.string_types) or not", "CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if name", "see a default get used, we'll set capacity to True", "command: Optional. If supplied, this is the command executed. If", "as positional arguments for calling the target function. kwargs: The", "+ fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note: num_required_args is the", "method should call this function. Args: component: The initial target", "_ComponentVisible(component, verbose=False): \"\"\"Returns whether a component should be visible in", "for index, value in enumerate(varargs): varargs[index] = _ParseValue(value, None, None,", "a FireError, Fire will raise a FireExit with code 2.", "a routine, call it using args from args. 2c. Otherwise", "value, and uses 'name' as the keyword and 'value' as", "function from the fn metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS)", "The steps performed by this method are: 1. Parse any", "help_flag in component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The", "{cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result =", "parsed_flag_args.trace # component can be a module, class, routine, object,", "its value from this arg or the next. got_argument =", "required arguments from the function's argspec. This is the number", "len(args) or args[index + 1].startswith('--'))) if contains_equals: keyword, value =", "of the command. Used in interactive mode and in the", "component_trace.AddError(error, initial_args) return component_trace if used_separator: # Add back in", "removed. remaining_args: A list of the supplied args that have", "if separator in remaining_args: # For the current component, only", "the function argspec. num_required_args: The number of required arguments from", "remaining_args: # We'll try to access a member of the", "both sides of a separator. The separator defaults to a", "remaining_args = args while True: last_component = component initial_args =", "that haven't been consumed yet. capacity: Whether the call could", "import inspectutils from fire import interact from fire import parser", "You may obtain a copy of the License at #", "is the main entrypoint for Python Fire. Executes a command", "script for a Fire CLI.\"\"\" return completion.Script(name, component) class FireError(Exception):", "component = value found_target = True break if not found_target:", "args while True: last_component = component initial_args = remaining_args if", "None: # Use the command line args by default if", "verbose)} if not result: return '{}' longest_key = max(len(str(key)) for", "for fn. The parse function accepts a list of arguments", "consumed and there's no function left to call or class", "for a Fire CLI.\"\"\" return completion.Script(name, component) class FireError(Exception): \"\"\"Exception", "return str(result).replace('\\n', ' ') def _Fire(component, args, context, name=None): \"\"\"Execute", "explicitly catch it with `except SystemExit` or `except FireExit`. If", "a sequence of arguments. FireExit: When Fire encounters a FireError,", "supplied args that have not been used yet. capacity: Whether", "but none are available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity =", "any Python object. You can call Fire on any Python", "supplied arguments for keyword arguments. Given a list of arguments,", "= '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to show", "supplied. Arguments that come after a final isolated '--' are", "Fire will raise a FireExit with code 2. When used", "of a separator. The separator defaults to a hyphen (-),", "six def Fire(component=None, command=None, name=None): \"\"\"This function, Fire, is the", "elif keyword.startswith('no'): keyword = keyword[2:] value = 'False' else: value", "remaining_args. The original function fn can then be called with", "Python Fire turns any Python object into a command line", "format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component, verbose=False):", "The argument name for **kwargs, or None if **kwargs not", "target = remaining_args[0] if target in component: component = component[target]", "and their values. remaining_args: The remaining command line arguments, which", "dict) and remaining_args: # The component is a dict; we'll", "lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno) except", "# It was an unnecessary separator. remaining_args = saved_args if", "with fn(*varargs, **kwargs). The remaining_args are the leftover args from", "Fire command. Execution begins with the initial target component. The", "the unused kwargs from the original args. remaining_args: A list", "Repeat 2a-2c until no args remain. 3a. Embed into ipython", "if contains_equals: keyword, value = keyword.split('=', 1) got_argument = True", "caller = inspect.stack()[1] caller_frame = caller[0] caller_globals = caller_frame.f_globals caller_locals", "= inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno) except FireError", "i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result,", "tool to stdout. --separator SEPARATOR: Use SEPARATOR in place of", "instantiating a class found during the traversal. The steps performed", "Fire CLIs, must go after a separating \"--\". For example,", "def __init__(self, code, component_trace): \"\"\"Constructs a FireExit exception. Args: code:", "conversion to ascii. return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return", "final isolated '--' are treated as Flags, eg for interactive", "-i. instance = component elif isinstance(component, (list, tuple)) and remaining_args:", "accepts, including positional and named arguments, but not the varargs", "with argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:]", "# We're making no progress. break if remaining_args: component_trace.AddError( FireError('Could", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "a remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def _ParseValue(value,", "separator; a single step will never consume arguments from both", "= parsed_flag_args.help show_trace = parsed_flag_args.trace # component can be a", "modifies kwargs. parsed_args, kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults,", "isinstance(result, (list, set, types.GeneratorType)): for i in result: print(_OneLineResult(i)) elif", "as a string. Args: result: The dict to convert to", "'The function received no value for the required argument:', arg)", "parsed_args.append(value) elif index < num_required_args: raise FireError( 'The function received", "CLI with Fire, your main method should call this function.", "the result of the Fire call to stdout in a", "as it consumes args. Raises: ValueError: If there are arguments", "caught, this exception will cause the client program to exit", "Returns: kwargs: A dictionary mapping keywords to values. remaining_kwargs: A", "args. remaining_args: A list of the unused arguments from the", "= None component_trace.AddAccessedProperty( component, target, [target], filename, lineno) elif remaining_args:", "'Cannot find target in dict:', target, component) component_trace.AddError(error, initial_args) return", "result serialized to a single line string.\"\"\" # TODO: Ensure", "eg for interactive mode or completion script generation. Other arguments", "component, only use arguments up to the separator. separator_index =", "initial_args) return component_trace if show_completion: if name is None: raise", "be # accepting **kwargs. if got_argument: skip_argument = not contains_equals", "(varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args,", "elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif", "kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw)", "import six def Fire(component=None, command=None, name=None): \"\"\"This function, Fire, is", "in the output.\"\"\" return ( verbose or not isinstance(component, six.string_types)", "if value is not None: # A value is specified", "from args. 2c. Otherwise access a member from component using", "(list, tuple)) and remaining_args: # The component is a tuple", "return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses", "and move serialization to it's own module. result = component_trace.GetResult()", "consumed by the execution of the Fire command, eg in", "and (index + 1 == len(args) or args[index + 1].startswith('--')))", "parse function for. Returns: A parse function for fn. The", "to stdout. --separator SEPARATOR: Use SEPARATOR in place of the", "parsed for. metadata: Metadata about the function, typically from Fire", "2, arguments will only ever be consumed up to a", "in help and usage information. -h --help: Provide help and", "are not raised by the Fire function, but rather are", "None component_trace.AddAccessedProperty( component, target, [target], filename, lineno) elif remaining_args: #", "context, name) if component_trace.HasError(): for help_flag in ['-h', '--help']: if", "fire import inspectutils from fire import interact from fire import", "command name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables", "getting this member. remaining_args: The remaining args that haven't been", "+ 1 < len(args): value = args[index + 1] got_argument", "last_component is initial_component: # If the initial component is a", "with the local and global variables available at the call", "found_target: error = FireError( 'Cannot find target in dict:', target,", "a Fire command on a target component using the args", "= format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component,", "strings is preferred. name: Optional. The name of the command", "for i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif", "target = remaining_args[0] component, consumed_args, remaining_args = _GetMember( component, remaining_args)", "arguments, which may still be used as positional arguments. metadata:", "type. Args: args: A list of arguments fn_args: A list", "the target object `component`'s members consuming arguments, evaluating functions, and", "( verbose or not isinstance(component, six.string_types) or not component.startswith('_')) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "it, and instead process the current component. break saved_args =", "# If we're allowed *varargs or **kwargs, there's always capacity.", "= context initial_component = component component_trace = trace.FireTrace( initial_component=initial_component, name=name,", "The target component for Fire. args: A list of args", "tuple)): args = command elif command is None: # Use", "License. # You may obtain a copy of the License", "(inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component is", "as # another type. # TODO: Consider alternatives for accessing", "that the target function accepts, including positional and named arguments,", "Fire program is available on the `trace` property. This exception", "help or trace flags, Fire will raise a FireExit with", "consume in Firing on the component, usually from the command", "calling the target function. kwargs: The input dict kwargs modified", "default value for this arg. capacity = True default_index =", "appropriate type. Args: args: A list of arguments fn_args: A", "into the defaults. parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items(): kwargs[key]", "dict with the local and global variables available at the", "= sys.argv[1:] else: raise ValueError('The command argument must be a", "interactive mode is selected. 3b. Generate a completion script if", "return members[arg_name], [arg], args[1:] raise FireError('Could not consume arg:', arg)", "list of args to consume in Firing on the component,", "used Returns: kwargs: A dictionary mapping keywords to values. remaining_kwargs:", "+ ':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns", "'name' as the keyword and 'value' as the value. Constructs", "capacity def _MakeParseFn(fn): \"\"\"Creates a parse function for fn. Args:", "next arg is a Flag, we consider # this flag", "fn_keywords: The argument name for **kwargs, or None if **kwargs", "= component component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help,", "Fire encounters a FireError, Fire will raise a FireExit with", "mapping keywords to values. remaining_kwargs: A list of the unused", "if argument.startswith('--'): # This is a named argument; get its", "final result. Raises: ValueError: If the command argument is supplied,", "target object `component`'s members consuming arguments, evaluating functions, and instantiating", "= True keyword = keyword.replace('-', '_') # In order for", "generation. Other arguments are consumed by the execution of the", "--) 2. Start with component as the current component. 2a.", "filename = None lineno = None component_trace.AddAccessedProperty( component, index, [arg],", "is a routine, call it using args from args. 2c.", "the local and global variables available at the call to", "be consumed. ValueError: If --completion is specified but no name", "value for key, value in result.items() if _ComponentVisible(key, verbose)} if", "Args: fn: The function to call or class to instantiate.", "that flag is provided. In step 2, arguments will only", "parsed, typically a command line argument. index: The index of", "= component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints the result of the", "[target], filename, lineno) elif remaining_args: # We'll try to access", "parse_fns: default = parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named']", "_CallCallable( component, remaining_args) # Update the trace. if isclass: component_trace.AddInstantiatedClass(", "or the next. got_argument = False keyword = argument[2:] contains_equals", "SEPARATOR: Use SEPARATOR in place of the default separator, '-'.", "order for us to consume the argument as a keyword", "expected, but none are available. \"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity", "variables = context.copy() if name is not None: variables[name] =", "kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index, arg, metadata): \"\"\"Parses value,", "with `except SystemExit` or `except FireExit`. If not caught, this", "the values of the args as strings. They are later", "import completion from fire import decorators from fire import helputils", "exceptions are not raised by the Fire function, but rather", "the Fire function as your main method to create a", "from sys.argv by recursively traversing the target object `component`'s members", "component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return result def", "- num_required_args # index into the defaults. parsed_args.append(fn_defaults[default_index]) for key,", "converts them to the appropriate type. Args: args: A list", "required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the list", "CLI.\"\"\" return completion.Script(name, component) class FireError(Exception): \"\"\"Exception used by Fire", "arg = remaining_args[0] try: index = int(arg) component = component[index]", "or in calling a function or instantiating a class found", "a member. target = remaining_args[0] if target in component: component", "the call could have taken additional args. \"\"\" parse =", "members[arg_name], [arg], args[1:] raise FireError('Could not consume arg:', arg) def", "value = remaining_args.pop(0) value = _ParseValue(value, index, arg, metadata) parsed_args.append(value)", "modified with the used kwargs removed. remaining_args: A list of", "exception inherits from SystemExit, so clients may explicitly catch it", "by the Fire function, but rather are caught and added", "':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns whether", "We check to see if any parse function from the", "target in component: component = component[target] elif target.replace('-', '_') in", "a class, instantiate it using args from args. 2b. If", "own module. result = component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)):", "no progress. break if remaining_args: component_trace.AddError( FireError('Could not consume arguments:',", "which to get a member. args: Args from which to", "= inspect.isclass(component) try: target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component)", "Fire is a library for creating CLIs from absolutely any", "a call to Fire. Eg: def main(argv): fire.Fire(Component) A Fire", "the Fire CLI. component_trace: (FireTrace) The trace for the Fire", "component is a class, keep an instance for use with", "last_component and remaining_args == initial_args: # We're making no progress.", "'{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines = [] for key, value", "isinstance(result, six.string_types): return str(result).replace('\\n', ' ') try: # Don't force", "creating CLIs from absolutely any Python object. You can call", "without command name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive:", "by getting this member. remaining_args: The remaining args that haven't", "to Fire. Eg: def main(argv): fire.Fire(Component) A Fire CLI command", "in arg_names: if arg_name in members: return members[arg_name], [arg], args[1:]", "FireError( 'The function received no value for the required argument:',", "class found during the traversal. The steps performed by this", "'--help']: if help_flag in component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand())", "the Fire command. \"\"\" super(FireExit, self).__init__(code) self.trace = component_trace def", "remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw: # If we're allowed", "for fn. Args: fn: The function or class to create", "it goes. When building a CLI with Fire, your main", "command in order to access a member of current component,", "IndexError): error = FireError( 'Unable to index into component with", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"\"\"Exception used by Fire when a Fire command cannot be", "dictionary mapping keywords to values. remaining_kwargs: A list of the", "else: if index + 1 < len(args): value = args[index", "= component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity", "or class left to instantiate, the resulting current component is", "elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "component: component = component[target] elif target.replace('-', '_') in component: component", "function, typically from Fire decorators. Returns: value, parsed into the", "They are later processed by _ParseArgs, which converts them to", "command is None: # Use the command line args by", "<= index < len(positional): parse_fn = positional[index] elif arg in", "args. 2d. Repeat 2a-2c until no args remain. 3a. Embed", "a stacktrace. \"\"\" def __init__(self, code, component_trace): \"\"\"Constructs a FireExit", "= _ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note: _ParseArgs modifies kwargs.", "list of the default values in the function argspec. num_required_args:", "six.string_types) or not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result serialized to", "specified through fn_args. This returns the values of the args", "treated as Flags, eg for interactive mode or completion script", "for us to consume the argument as a keyword arg,", "of current component, call the current component (if it's a", "required by applicable law or agreed to in writing, software", "way.\"\"\" # TODO: Design human readable deserializable serialization method #", "a starting component and args, this function gets a member", "args. \"\"\" parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity", "keyword arg, we either: # Need to be explicitly expecting", "Embed into ipython REPL if interactive mode is selected. 3b.", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args)", "Get args as a list. if isinstance(command, six.string_types): args =", "parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args)", "\"\"\"Calls the function fn by consuming args from args. Args:", "component should be visible in the output.\"\"\" return ( verbose", "name: Optional. The name of the command. Used in interactive", "capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if", "name for **kwargs, or None if **kwargs not used Returns:", "the component, usually from the command line. context: A dict", "overwritten with the --separator Fire argument. Args: component: The target", "index: The index of the value in the function's argspec.", "component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename = None", "\"\"\" parse_fn = parser.DefaultParseValue # We check to see if", "example \"command fn arg1 arg2\" might access the \"fn\" property", "# limitations under the License. \"\"\"Python Fire is a library", "to access a member of the component. try: target =", "by the function, specified through fn_args. This returns the values", "is a class or a routine; we'll try to initialize", "line = format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def", "= _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs", "positional and named arguments, but not the varargs or kwargs", "using an arg from args. 2d. Repeat 2a-2c until no", "= decorators.GetMetadata(fn) # Note: num_required_args is the number of positional", "component: The component that was found by consuming an arg.", "sys.argv by recursively traversing the target object `component`'s members consuming", "Generate a completion script if that flag is provided. In", "agreed to in writing, software # distributed under the License", "Python REPL after running the command. --completion: Write the Bash", "arguments are consumed by the execution of the Fire command,", "input dict kwargs modified with the used kwargs removed. remaining_args:", "after running the command. --completion: Write the Bash completion script", "component, target, consumed_args, filename, lineno, capacity) except FireError as error:", "Note: _ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args, capacity = _ParseArgs(", "# Need to be explicitly expecting the keyword, or we", "distributed under the License is distributed on an \"AS IS\"", "Ensure line is fewer than eg 120 characters. if isinstance(result,", "Used in interactive mode and for generating the completion script.", "name of the argument the value is being parsed for.", "argument. Args: component: The target component for Fire. args: A", "Fire will raise a FireExit with code 0 if successful.", "arguments, and returns a list of the remaining arguments. Only", "if skip_argument: skip_argument = False continue arg_consumed = False if", "only use arguments up to the separator. separator_index = remaining_args.index(separator)", "None: raise ValueError('Cannot make completion script without command name') script", "parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help", "module, class, routine, object, etc. if component is None: component", "When building a CLI with Fire, your main method should", "the keyword, or we need to be # accepting **kwargs.", "a Fire CLI.\"\"\" return completion.Script(name, component) class FireError(Exception): \"\"\"Exception used", "component, index, [arg], filename, lineno) elif isinstance(component, dict) and remaining_args:", "about the function, typically from Fire decorators. Returns: parsed_args: A", "in dict:', target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args =", "but no name available. \"\"\" args, flag_args = parser.SeparateFlagArgs(args) argparser", "component is the final result. Raises: ValueError: If the command", "component for Fire. args: A list of args to consume", "arguments up to the separator. separator_index = remaining_args.index(separator) saved_args =", "operation. For example \"command fn arg1 arg2\" might access the", "remaining_args: # The component is a dict; we'll try to", "index, argument in enumerate(args): if skip_argument: skip_argument = False continue", "_ParseFn(args): \"\"\"Parses the list of `args` into (varargs, kwargs), remaining_args.\"\"\"", "A dictionary mapping keywords to values. remaining_kwargs: A list of", "This is the number of arguments without a default value.", "fn_defaults: A list of the default values in the function", "no value for the required argument:', arg) else: # We're", "\"\"\"Parses the list of `args` into (varargs, kwargs), remaining_args.\"\"\" kwargs,", "These exceptions are not raised by the Fire function, but", "**kwargs) return result, consumed_args, remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates a", "\"\"\" name = name or os.path.basename(sys.argv[0]) # Get args as", "help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print(", "component = component[target] elif target.replace('-', '_') in component: component =", "by consuming an arg. consumed_args: The args that were consumed", "up to a separator; a single step will never consume", "a string. Args: result: The dict to convert to a", "present:', extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs) if missing_kwonly: raise", "We're past the args for which there's no default value.", "If the current component is a routine, call it using", "of the remaining arguments. Only if fn_keywords is None, this", "default_index = index - num_required_args # index into the defaults.", "str(result).replace('\\n', ' ') try: # Don't force conversion to ascii.", "turns any Python object into a command line interface. Simply", "component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif", "_GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target,", "accepts a list of arguments and returns (varargs, kwargs), remaining_args.", "_MakeParseFn(fn): \"\"\"Creates a parse function for fn. Args: fn: The", "in keyword is_bool_syntax = ( not contains_equals and (index +", "Flag args (the args after the final --) 2. Start", "at each operation the component becomes the result of the", "True elif is_bool_syntax: # Since there's no next arg or", "fire import parser from fire import trace import six def", "CLIs from absolutely any Python object. You can call Fire", "Write the Bash completion script for the tool to stdout.", "command executed. If not supplied, then the command is taken", "Used in interactive mode and in the tab completion script.", "remaining arguments. Args: value: The string value to be parsed,", "mode and for generating the completion script. Returns: The result", "TODO: Design human readable deserializable serialization method # and move", "- len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses", "component_trace) elif component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose),", "raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple):", "command you might run: `command -- --help`. The available flags", "initial component is a class, keep an instance for use", "function's argspec. This is the number of arguments without a", "args. Given a starting component and args, this function gets", "objects, dictionaries, lists, tuples, etc. They all work! Python Fire", "past the args for which there's no default value. #", "\"\"\" super(FireExit, self).__init__(code) self.trace = component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints", "missing_kwonly) # If we accept *varargs, then use all remaining", "len(args): value = args[index + 1] got_argument = True keyword", "command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to", "super(FireExit, self).__init__(code) self.trace = component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints the", "were consumed for the function call. remaining_args: The remaining args", "parse_fns['named'] if index is not None and 0 <= index", "OR CONDITIONS OF ANY KIND, either express or implied. #", "if missing_kwonly: raise FireError('Missing required flags:', missing_kwonly) # If we", "only finds argument names used by the function, specified through", "from fire import trace import six def Fire(component=None, command=None, name=None):", "the License is distributed on an \"AS IS\" BASIS, #", "target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args,", "saved_args else: # It was an unnecessary separator. remaining_args =", "print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace:", "fn_spec.varkw) # Note: _ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args, capacity", "a single line string.\"\"\" # TODO: Ensure line is fewer", "Add back in the arguments from after the separator. if", "function, specified through fn_args. This returns the values of the", "= args[index + 1] got_argument = True keyword = keyword.replace('-',", "filename, lineno, capacity) except FireError as error: component_trace.AddError(error, initial_args) return", "routine; we'll try to initialize it or # call it.", "skip_argument: remaining_kwargs.append(args[index + 1]) if not arg_consumed: # The argument", "shlex.split(command) elif isinstance(command, (list, tuple)): args = command elif command", "each operation the component becomes the result of the preceding", "kwargs = {} remaining_kwargs = [] remaining_args = [] if", "The component is a dict; we'll try to access a", "a CLI. When using Fire to build a CLI, your", "process. Args: component: The component from which to get a", "the final class or call the final function unless #", "TODO: Consider alternatives for accessing non-string keys. found_target = False", "is as # another type. # TODO: Consider alternatives for", "the initial target component. The component is updated by using", "be a boolean. got_argument = True if keyword in fn_args:", "command is taken from sys.argv instead. This can be a", "inspectutils from fire import interact from fire import parser from", "remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args) return component_trace", "The number of required arguments from the function's argspec. This", "value has been explicitly specified. if remaining_args and accepts_positional_args: #", "function call. consumed_args: The args that were consumed for the", "instantiate it using args from args. 2b. If the current", "A string representing the dict \"\"\" result = {key: value", "any Python object into a command line interface. Simply call", "import interact from fire import parser from fire import trace", "args by default if no command is specified. args =", "for use with -i. instance = component elif isinstance(component, (list,", "law or agreed to in writing, software # distributed under", "enumerate(varargs): varargs[index] = _ParseValue(value, None, None, metadata) varargs = parsed_args", "max(len(str(key)) for key in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key +", "The dict to convert to a string verbose: Whether to", "return component_trace def _GetMember(component, args): \"\"\"Returns a subcomponent of component", "remaining_args: # The component is a tuple or list; we'll", "this method are: 1. Parse any Flag args (the args", "not remaining_args and (show_help or interactive or show_trace or show_completion):", "Since there's no next arg or the next arg is", "False if separator in remaining_args: # For the current component,", "Returns: value, parsed into the appropriate type for calling a", "in interactive mode and for generating the completion script. Returns:", "left to call or class left to instantiate, the resulting", "starting component and args, this function gets a member from", "it's a class). The target component begins as Component, and", "= component_trace.GetResult() return result def CompletionScript(name, component): \"\"\"Returns the text", "The remaining args that haven't been consumed yet. capacity: Whether", "by _ParseArgs, which converts them to the appropriate type. Args:", "target, consumed_args, filename, lineno, capacity) except FireError as error: component_trace.AddError(error,", "component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else:", "component: The component from which to get a member. args:", "get a member. \"\"\" members = dict(inspect.getmembers(component)) arg = args[0]", "Returns: FireTrace of components starting with component, tracing Fire's execution", "arguments that cannot be consumed. ValueError: If --completion is specified", "as error: component_trace.AddError(error, initial_args) return component_trace if last_component is initial_component:", "show_help=show_help, show_trace=show_trace) instance = None remaining_args = args while True:", "may obtain a copy of the License at # #", "the number of arguments without a default value. kwargs: Dict", "from args. 2d. Repeat 2a-2c until no args remain. 3a.", "instantiate. args: Args from which to consume for calling the", "there's no default value. # There's a default value for", "for the function call. remaining_args: The remaining args that haven't", "The input dict kwargs modified with the used kwargs removed.", "not is_bool_syntax arg_consumed = True if keyword in fn_args or", "haven't been consumed yet. Raises: FireError: If we cannot consume", "remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index, arg, metadata):", "absolutely any Python object. You can call Fire on any", "or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component is not", "Additional examples are available in the examples directory. Fire Flags,", "component = context initial_component = component component_trace = trace.FireTrace( initial_component=initial_component,", "in the examples directory. Fire Flags, common to all Fire", "not None: variables[name] = initial_component variables['component'] = initial_component variables['result'] =", "are the leftover args from the arguments to the parse", "value = 'True' elif keyword.startswith('no'): keyword = keyword[2:] value =", "may not use this file except in compliance with the", "FireExit with code 2. When used with the help or", "os.path.basename(sys.argv[0]) # Get args as a list. if isinstance(command, six.string_types):", "in order to access a member of current component, call", "class to instantiate. args: Args from which to consume for", "a CLI, your main method includes a call to Fire.", "the argument as a keyword arg, we either: # Need", "REPL after running the command. --completion: Write the Bash completion", "this file except in compliance with the License. # You", "a command line interface. Simply call the Fire function as", "lineno = None component_trace.AddAccessedProperty( component, target, [target], filename, lineno) elif", "consume the argument as a keyword arg, we either: #", "from absolutely any Python object. You can call Fire on", "that is the result of the function call. consumed_args: The", "CLI, your main method includes a call to Fire. Eg:", "len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args):", "in the command in order to access a member of", "completion script if that flag is provided. In step 2,", "arg_names: if arg_name in members: return members[arg_name], [arg], args[1:] raise", "the next component. Returns: component: The component that was found", "get its value from this arg or the next. got_argument", "kwargs. parsed_args, kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args,", "remaining args that haven't been consumed yet. Raises: FireError: If", "unused kwargs from the original args. remaining_args: A list of", "be executed. These exceptions are not raised by the Fire", "fn. Args: fn: The function or class to create the", "return '{}' longest_key = max(len(str(key)) for key in result.keys()) format_string", "completion script. Returns: FireTrace of components starting with component, tracing", "# # Licensed under the Apache License, Version 2.0 (the", "else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1]) if not arg_consumed:", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "by Fire to the client in the case of a", "us to consume the argument as a keyword arg, we", "def _GetMember(component, args): \"\"\"Returns a subcomponent of component by consuming", "raise FireError( 'The function received no value for the required", "= ( not contains_equals and (index + 1 == len(args)", "The component is a class or a routine; we'll try", "used_separator = True assert separator not in remaining_args if inspect.isclass(component)", "to consume in the search for the next component. Returns:", "= True if keyword in fn_args: value = 'True' elif", "six.string_types): args = shlex.split(command) elif isinstance(command, (list, tuple)): args =", "command. -i --interactive: Drop into a Python REPL after running", "FireExit: When Fire encounters a FireError, Fire will raise a", "on a target component using the args supplied. Arguments that", "the appropriate type. The function used to parse value is", "from args. Args: fn: The function to call or class", "to call or class left to instantiate, the resulting current", "is initial_component: # If the initial component is a class,", "a Flag, we consider # this flag to be a", "and instantiating classes as it goes. When building a CLI", "or instantiating a class found during the traversal. The steps", "--name value, and uses 'name' as the keyword and 'value'", "is a class, keep an instance for use with -i.", "line. context: A dict with the local and global variables", "_ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses the positional and", "of values to be used as positional arguments for calling", "at the command line. Used in interactive mode and for", "unnamed args. parsed_args = [] for index, arg in enumerate(fn_args):", "{} remaining_kwargs = [] remaining_args = [] if not args:", "positional arguments without # default values. All of these arguments", "examples are available in the examples directory. Fire Flags, common", "raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0,", "not arg_consumed: # The argument was not consumed, so it", "inspect.isclass(component) try: target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component,", "1) got_argument = True elif is_bool_syntax: # Since there's no", "the tab completion script. Returns: FireTrace of components starting with", "remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename,", "the final function unless # there's a separator after it,", "== str(key): component = value found_target = True break if", "dict:', target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:]", "args that have not been used yet. capacity: Whether the", "lineno) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if", "with code 0 if successful. \"\"\" name = name or", "trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr)", "target component. command: Optional. If supplied, this is the command", "list of `args` into (varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args", "(FireTrace) The trace for the Fire command. \"\"\" super(FireExit, self).__init__(code)", "component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif", "members in help and usage information. -h --help: Provide help", "The trace of the Fire program is available on the", "taken additional args. \"\"\" parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args,", "target function accepts, including positional and named arguments, but not", "the command arguments to either access a member of the", "import os import pipes import shlex import sys import types", "a boolean. got_argument = True if keyword in fn_args: value", "function, but rather are caught and added to the FireTrace.", "calling the function. Returns: component: The object that is the", "arg or the next arg is a Flag, we consider", "or implied. # See the License for the specific language", "but rather are caught and added to the FireTrace. \"\"\"", "usage information for the command. -i --interactive: Drop into a", "- set(kwargs) if missing_kwonly: raise FireError('Missing required flags:', missing_kwonly) #", "consuming args from args. Args: fn: The function to call", "index, arg in enumerate(fn_args): value = kwargs.pop(arg, None) if value", "raise FireExit(0, component_trace) elif component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result,", "a single step will never consume arguments from both sides", "not the varargs or kwargs names. fn_defaults: A list of", "\"\"\"Parses the supplied arguments for keyword arguments. Given a list", "and accepts_positional_args: # Use a positional arg. value = remaining_args.pop(0)", "fn_args: value = 'True' elif keyword.startswith('no'): keyword = keyword[2:] value", "a string verbose: Whether to include 'hidden' members, those keys", "kwargs), remaining_args. The original function fn can then be called", "be called with fn(*varargs, **kwargs). The remaining_args are the leftover", "in Firing on the component, usually from the command line.", "all remaining arguments for *varargs. if fn_spec.varargs is not None:", "there are arguments that cannot be consumed. ValueError: If --completion", "= parse(args) result = fn(*varargs, **kwargs) return result, consumed_args, remaining_args,", "if index + 1 < len(args): value = args[index +", "if parse_fns: default = parse_fns['default'] positional = parse_fns['positional'] named =", "updated by using the command arguments to either access a", "Dict with named command line arguments and their values. remaining_args:", "arg1 arg2\" might access the \"fn\" property of the initial", "interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help", "consumed up to a separator; a single step will never", "component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help:", "trace flags, Fire will raise a FireExit with code 0", "all arguments are consumed and there's no function left to", "a routine; we'll try to initialize it or # call", "- set(fn_spec.kwonlyargs) if fn_spec.varkw is None and extra_kw: raise FireError('Unexpected", "arguments to the parse function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args", "metadata): \"\"\"Parses the positional and named arguments from the available", "if component is None: component = context initial_component = component", "a named argument; get its value from this arg or", "= not contains_equals and not is_bool_syntax arg_consumed = True if", "remaining_args[:separator_index] used_separator = True assert separator not in remaining_args if", "= False for index, argument in enumerate(args): if skip_argument: skip_argument", "the appropriate type. Args: args: A list of arguments fn_args:", "\"\"\"Prints the result of the Fire call to stdout in", "component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if name is not", "0 if successful. \"\"\" name = name or os.path.basename(sys.argv[0]) #", "from Fire decorators. Returns: parsed_args: A list of values to", "could have taken args in place of defaults. Raises: FireError:", "this arg or the next. got_argument = False keyword =", "when a Fire command cannot be executed. These exceptions are", "from the original args. remaining_args: A list of the unused", "the dict \"\"\" result = {key: value for key, value", "= True elif is_bool_syntax: # Since there's no next arg", "strings; a list of strings is preferred. name: Optional. The", "= caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component,", "the Fire Trace for the command. \"\"\" from __future__ import", "raise FireError('Could not consume arg:', arg) def _CallCallable(fn, args): \"\"\"Calls", "lists, tuples, etc. They all work! Python Fire turns any", "information for the command. -i --interactive: Drop into a Python", "print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types, float, complex)): print(result) elif", "this arg. capacity = True default_index = index - num_required_args", "a string or a sequence of ' 'arguments.') # Determine", "available on the `trace` property. This exception inherits from SystemExit,", "def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the supplied arguments for keyword", "are treated as Flags, eg for interactive mode or completion", "arg, metadata) parsed_args.append(value) else: # No value has been explicitly", "arg:', arg) def _CallCallable(fn, args): \"\"\"Calls the function fn by", "function fn can then be called with fn(*varargs, **kwargs). The", "\"command fn arg1 arg2\" might access the \"fn\" property of", "completion script generation. Other arguments are consumed by the execution", "a separator. The separator defaults to a hyphen (-), and", "All of these arguments are required. num_required_args = len(fn_spec.args) -", "+ saved_args else: # It was an unnecessary separator. remaining_args", "a hyphen (-), and can be overwritten with the --separator", "remaining_args = _GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty(", "the command. \"\"\" from __future__ import absolute_import from __future__ import", "or **kwargs, there's always capacity. capacity = True extra_kw =", "arguments. Args: value: The string value to be parsed, typically", "decorators. Returns: value, parsed into the appropriate type for calling", "argument to get a member. \"\"\" members = dict(inspect.getmembers(component)) arg", "a separator; a single step will never consume arguments from", "(list, set, types.GeneratorType)): for i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result):", "the trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno,", "defaults to a hyphen (-), and can be overwritten with", "a command either from the `command` argument or from sys.argv", "import json import os import pipes import shlex import sys", "are consumed by the execution of the Fire command, eg", "isclass = inspect.isclass(component) try: target = component.__name__ filename, lineno =", "yet. Raises: FireError: If we cannot consume an argument to", "have taken additional args. \"\"\" parse = _MakeParseFn(fn) (varargs, kwargs),", "an unnecessary separator. remaining_args = saved_args if component is last_component", "unnecessary separator. remaining_args = saved_args if component is last_component and", "FireError('Could not consume arguments:', remaining_args), initial_args) return component_trace if show_completion:", "remaining_args: The remaining args that haven't been consumed yet. capacity:", "Fire turns any Python object into a command line interface.", "print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns a dict as", "instance = None remaining_args = args while True: last_component =", "including positional and named arguments, but not the varargs or", "value, a string, into the appropriate type. The function used", "The target component begins as Component, and at each operation", "parser.DefaultParseValue # We check to see if any parse function", "component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire", "component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace),", "components starting with component, tracing Fire's execution path as it", "_ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the supplied arguments for keyword arguments.", "the args as strings. They are later processed by _ParseArgs,", "explicitly expecting the keyword, or we need to be #", "args, context, name=None): \"\"\"Execute a Fire command on a target", "except FireError as error: component_trace.AddError(error, initial_args) return component_trace if used_separator:", "to the client in the case of a FireError. The", "parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the", "raise a FireExit with code 2. When used with the", "component[target.replace('-', '_')] else: # The target isn't present in the", "tuple)) and remaining_args: # The component is a tuple or", "len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the", "Executes a command either from the `command` argument or from", "the function call. consumed_args: The args that were consumed for", "= component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace)", "component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif", "work! Python Fire turns any Python object into a command", "filename, lineno) elif remaining_args: # We'll try to access a", "this function gets a member from that component, consuming one", "def _MakeParseFn(fn): \"\"\"Creates a parse function for fn. Args: fn:", "remaining_args == initial_args: # We're making no progress. break if", "target, consumed_args, filename, lineno) except FireError as error: component_trace.AddError(error, initial_args)", "the command line. context: A dict with the local and", "way to show help is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr)", "kwargs names. fn_defaults: A list of the default values in", "class, instantiate it using args from args. 2b. If the", "script generation. Other arguments are consumed by the execution of", "line string.\"\"\" # TODO: Ensure line is fewer than eg", "a function), or instantiate the current component (if it's a", "for key in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1)", "and at each operation the component becomes the result of", "# Don't force conversion to ascii. return json.dumps(result, ensure_ascii=False) except", "continue arg_consumed = False if argument.startswith('--'): # This is a", "there's a separator after it, and instead process the current", "processed by _ParseArgs, which converts them to the appropriate type.", "Select unnamed args. parsed_args = [] for index, arg in", "arg or the next. got_argument = False keyword = argument[2:]", "flag to be a boolean. got_argument = True if keyword", "an arg from args. Given a starting component and args,", "modules, objects, dictionaries, lists, tuples, etc. They all work! Python", "from fire import interact from fire import parser from fire", "name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None remaining_args =", "supplied args. Modifies kwargs, removing args as they are used.", "FireExit exception. Args: code: (int) Exit code for the Fire", "The initial target component. command: Optional. If supplied, this is", "until no args remain. 3a. Embed into ipython REPL if", "1].startswith('--'))) if contains_equals: keyword, value = keyword.split('=', 1) got_argument =", "print(('WARNING: The proper way to show help is {cmd}.\\n' 'Showing", "[] used_separator = False if separator in remaining_args: # For", "' 'arguments.') # Determine the calling context. caller = inspect.stack()[1]", "= component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)): for i in", "dict as a string, but maybe it is as #", "for index, argument in enumerate(args): if skip_argument: skip_argument = False", "of arguments and returns (varargs, kwargs), remaining_args. The original function", "raised by Fire to the client in the case of", "not used Returns: kwargs: A dictionary mapping keywords to values.", "have taken args in place of defaults. Raises: FireError: if", "function received no value for the required argument:', arg) else:", "or a sequence of arguments. FireExit: When Fire encounters a", "fn_spec.varargs is not None: varargs, remaining_args = remaining_args, [] else:", "Only if fn_keywords is None, this only finds argument names", "2a. If the current component is a class, instantiate it", "a command line argument. index: The index of the value", "the current component (if it's a class). When all arguments", "inspect.isroutine(component): # The component is a class or a routine;", "component = component[index] except (ValueError, IndexError): error = FireError( 'Unable", "fn: The function or class to create the parse function", "routine, call it using args from args. 2c. Otherwise access", "will never consume arguments from both sides of a separator.", "component with argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args =", "caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args,", "# Copyright (C) 2017 Google Inc. # # Licensed under", "or interactive or show_trace or show_completion): # Don't initialize the", "was found by consuming an arg. consumed_args: The args that", "= _ParseValue(value, index, arg, metadata) parsed_args.append(value) elif index < num_required_args:", "then be called with fn(*varargs, **kwargs). The remaining_args are the", "else: component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno, capacity) except FireError", "We'll try to access a member of the component. try:", "to instantiate, the resulting current component is the final result.", "print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types,", "# The target isn't present in the dict as a", "in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines =", "the client program to exit without a stacktrace. \"\"\" def", "argument.startswith('--'): # This is a named argument; get its value", "component using the args supplied. Arguments that come after a", "of these arguments are required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults)", "= remaining_args if not remaining_args and (show_help or interactive or", "FireError: if additional positional arguments are expected, but none are", "there's no next arg or the next arg is a", "or instantiate the current component (if it's a class). When", "'arg1' and 'arg2'. Additional examples are available in the examples", "Returns: component: The object that is the result of the", "metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If we see a default", "= keyword[2:] value = 'False' else: value = 'True' else:", "readable way.\"\"\" # TODO: Design human readable deserializable serialization method", "in the tab completion script. Returns: FireTrace of components starting", "execution of the Fire command, eg in the traversal of", "in writing, software # distributed under the License is distributed", "caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals context = {} context.update(caller_globals)", "interactive mode and in the tab completion script. Returns: FireTrace", "component_trace remaining_args = remaining_args[1:] filename = None lineno = None", "the result of the preceding operation. For example \"command fn", "separator, '-'. --trace: Get the Fire Trace for the command.", "kwargs, remaining_args, metadata): \"\"\"Parses the positional and named arguments from", "file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help: result = component_trace.GetResult() print(", "used_separator: # Add back in the arguments from after the", "a function or instantiating a class found during the traversal.", "six.string_types): return str(result).replace('\\n', ' ') try: # Don't force conversion", "starting with component, tracing Fire's execution path as it consumes", "True if keyword in fn_args: value = 'True' elif keyword.startswith('no'):", "no default value. # There's a default value for this", "a command you might run: `command -- --help`. The available", "Fire decorators. Returns: value, parsed into the appropriate type for", "component) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename =", "during the traversal. The steps performed by this method are:", "command line arguments, which may still be used as positional", "extra_kw: raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly = set(required_kwonly) -", "result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2,", "= remaining_args.pop(0) value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) elif", "object. You can call Fire on any Python object: functions,", "if not result: return '{}' longest_key = max(len(str(key)) for key", "to the parse function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args =", "- len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args, capacity return _ParseFn", "get used, we'll set capacity to True # Select unnamed", "are used. Args: fn_args: A list of argument names that", "try to initialize it or # call it. isclass =", "not None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns a", "<filename>fire/core.py # Copyright (C) 2017 Google Inc. # # Licensed", "of the initial target component, and then call that function", "variables['component'] = initial_component variables['result'] = component variables['trace'] = component_trace if", "used as positional arguments. metadata: Metadata about the function, typically", "component. The component is updated by using the command arguments", "arguments. FireExit: When Fire encounters a FireError, Fire will raise", "set(fn_spec.kwonlyargs) if fn_spec.varkw is None and extra_kw: raise FireError('Unexpected kwargs", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "is fewer than eg 120 characters. if isinstance(result, six.string_types): return", "the used kwargs removed. remaining_args: A list of the supplied", "argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator", "License, Version 2.0 (the \"License\"); # you may not use", "unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator", "= positional[index] elif arg in named: parse_fn = named[arg] elif", "of the component, or in calling a function or instantiating", "consume an argument to get a member. \"\"\" members =", "and 0 <= index < len(positional): parse_fn = positional[index] elif", "args from args. 2b. If the current component is a", "in ['-h', '--help']: if help_flag in component_trace.elements[-1].args: command = '{cmd}", "= name or os.path.basename(sys.argv[0]) # Get args as a list.", "as the current component. 2a. If the current component is", "parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose =", "inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif component is not last_component:", "key, value in result.items(): line = format_string.format(key=str(key) + ':', value=_OneLineResult(value))", "cause the client program to exit without a stacktrace. \"\"\"", "component. command: Optional. If supplied, this is the command executed.", "_Fire(component, args, context, name=None): \"\"\"Execute a Fire command on a", "is supplied, but not a string or a sequence of", "object, etc. if component is None: component = context initial_component", "self).__init__(code) self.trace = component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints the result", "Python object: functions, classes, modules, objects, dictionaries, lists, tuples, etc.", "arguments. metadata: Metadata about the function, typically from Fire decorators.", "value in kwargs.items(): kwargs[key] = _ParseValue(value, None, key, metadata) return", "one arg in the process. Args: component: The component from", "return completion.Script(name, component) class FireError(Exception): \"\"\"Exception used by Fire when", "instantiate the current component (if it's a class). When all", "value to be parsed, typically a command line argument. index:", "a list of strings; a list of strings is preferred.", "with the used kwargs removed. remaining_args: A list of the", "\"\"\"Returns the text of the Bash completion script for a", "The component that was found by consuming an arg. consumed_args:", "the License for the specific language governing permissions and #", "component[index] except (ValueError, IndexError): error = FireError( 'Unable to index", "args: A list of args to consume in Firing on", "= initial_component variables['component'] = initial_component variables['result'] = component variables['trace'] =", "def main(argv): fire.Fire(Component) A Fire CLI command is run by", "in the process. Args: component: The component from which to", "initialize it or # call it. isclass = inspect.isclass(component) try:", "functions, classes, modules, objects, dictionaries, lists, tuples, etc. They all", "kwargs present:', extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs) if missing_kwonly:", "= parser.DefaultParseValue # We check to see if any parse", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "skip_argument = False for index, argument in enumerate(args): if skip_argument:", "elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types, float,", "ever be consumed up to a separator; a single step", "Use a positional arg. value = remaining_args.pop(0) value = _ParseValue(value,", "separator not in remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The", "is the number of arguments without a default value. kwargs:", "traversal of the members of the component, or in calling", "of arguments. FireExit: When Fire encounters a FireError, Fire will", "'{}' longest_key = max(len(str(key)) for key in result.keys()) format_string =", "consumed yet. capacity: Whether the call could have taken additional", "the unused arguments from the original args. \"\"\" kwargs =", "current component. 2a. If the current component is a class,", "function. Args: component: The initial target component. command: Optional. If", "component_trace if instance is not None: variables['self'] = instance interact.Embed(variables,", "boolean. got_argument = True if keyword in fn_args: value =", "component. break saved_args = [] used_separator = False if separator", "performed by this method are: 1. Parse any Flag args", "human readable deserializable serialization method # and move serialization to", "name is None: raise ValueError('Cannot make completion script without command", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "function for fn. Args: fn: The function or class to", "and uses 'name' as the keyword and 'value' as the", "**kwargs not used Returns: kwargs: A dictionary mapping keywords to", "from __future__ import division from __future__ import print_function import inspect", "# this flag to be a boolean. got_argument = True", "break saved_args = [] used_separator = False if separator in", "name is not None: variables[name] = initial_component variables['component'] = initial_component", "being parsed for. metadata: Metadata about the function, typically from", "positional[index] elif arg in named: parse_fn = named[arg] elif default", "separator. if remaining_args: remaining_args = remaining_args + [separator] + saved_args", "component can be a module, class, routine, object, etc. if", "in the search for the next component. Returns: component: The", "Use SEPARATOR in place of the default separator, '-'. --trace:", "then call that function with arguments 'arg1' and 'arg2'. Additional", "of the args as strings. They are later processed by", "of strings is preferred. name: Optional. The name of the", "object: functions, classes, modules, objects, dictionaries, lists, tuples, etc. They", "in component: component = component[target.replace('-', '_')] else: # The target", "component, target, consumed_args, filename, lineno) except FireError as error: component_trace.AddError(error,", "with _. Returns: A string representing the dict \"\"\" result", "if not arg_consumed: # The argument was not consumed, so", "component.items(): if target == str(key): component = value found_target =", "of the command as entered at the command line. Used", "inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result,", "current component is a routine, call it using args from", "context.copy() if name is not None: variables[name] = initial_component variables['component']", "FireExit(SystemExit): \"\"\"An exception raised by Fire to the client in", "Args from which to consume for calling the function. Returns:", "# Since there's no next arg or the next arg", "or kwargs names. fn_defaults: A list of the default values", "component) class FireError(Exception): \"\"\"Exception used by Fire when a Fire", "and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result,", "parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion", "= instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args):", "the arguments from after the separator. if remaining_args: remaining_args =", "# distributed under the License is distributed on an \"AS", "The component is updated by using the command arguments to", "initial_args) return component_trace if last_component is initial_component: # If the", "last_component = component initial_args = remaining_args if not remaining_args and", "index < num_required_args: raise FireError( 'The function received no value", "# Unless required by applicable law or agreed to in", "exception. Args: code: (int) Exit code for the Fire CLI.", "if isinstance(result, six.string_types): return str(result).replace('\\n', ' ') try: # Don't", "string value to be parsed, typically a command line argument.", "Flags, eg for interactive mode or completion script generation. Other", "component is None: component = context initial_component = component component_trace", "dict(inspect.getmembers(component)) arg = args[0] arg_names = [ arg, arg.replace('-', '_'),", "Args: component: The initial target component. command: Optional. If supplied,", "_PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return result def CompletionScript(name, component):", "+ [separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args =", "key, value in kwargs.items(): kwargs[key] = _ParseValue(value, None, key, metadata)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "( not contains_equals and (index + 1 == len(args) or", "is being parsed for. metadata: Metadata about the function, typically", "member. \"\"\" members = dict(inspect.getmembers(component)) arg = args[0] arg_names =", "= None remaining_args = args while True: last_component = component", "is not None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return", "parsed_args: A list of values to be used as positional", "args = sys.argv[1:] else: raise ValueError('The command argument must be", "the help or trace flags, Fire will raise a FireExit", "the `trace` property. This exception inherits from SystemExit, so clients", "the current component. break saved_args = [] used_separator = False", "+= remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)] return (varargs, kwargs),", "verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns a dict as a string.", "if component_trace.HasError(): for help_flag in ['-h', '--help']: if help_flag in", "inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno) except FireError as", "value. kwargs: Dict with named command line arguments and their", "dict; we'll try to access a member. target = remaining_args[0]", "decorators. Returns: parsed_args: A list of values to be used", "[] if not args: return kwargs, remaining_kwargs, remaining_args skip_argument =", "Optional. If supplied, this is the command executed. If not", "index is not None and 0 <= index < len(positional):", "remaining_args and accepts_positional_args: # Use a positional arg. value =", "keyword.startswith('no'): keyword = keyword[2:] value = 'False' else: value =", "the remaining arguments. Args: value: The string value to be", "a member of the component. try: target = remaining_args[0] component,", "= CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables = context.copy() if", "result.items(): line = format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines)", "to ascii. return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\\n',", "the search for the next component. Returns: component: The component", "raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs)", "were consumed by getting this member. remaining_args: The remaining args", "the Apache License, Version 2.0 (the \"License\"); # you may", "the positional and named arguments from the available supplied args.", "# accepting **kwargs. if got_argument: skip_argument = not contains_equals and", "division from __future__ import print_function import inspect import json import", "command line. Used in interactive mode and for generating the", "the process. Args: component: The component from which to get", "to create the parse function for. Returns: A parse function", "= caller_frame.f_globals caller_locals = caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals)", "remain. 3a. Embed into ipython REPL if interactive mode is", "Returns: The result of executing the Fire command. Execution begins", "under the License. \"\"\"Python Fire is a library for creating", "elif remaining_args: # We'll try to access a member of", "was an unnecessary separator. remaining_args = saved_args if component is", "access a member. arg = remaining_args[0] try: index = int(arg)", "is_bool_syntax arg_consumed = True if keyword in fn_args or fn_keywords:", "If we accept *varargs, then use all remaining arguments for", "'--' are treated as Flags, eg for interactive mode or", "component_trace.AddAccessedProperty( component, index, [arg], filename, lineno) elif isinstance(component, dict) and", "from the original args. \"\"\" kwargs = {} remaining_kwargs =", "for a command you might run: `command -- --help`. The", "CLI. component_trace: (FireTrace) The trace for the Fire command. \"\"\"", "'\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns whether a component should be", "If not supplied, then the command is taken from sys.argv", "is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result", "call to stdout in a human readable way.\"\"\" # TODO:", "if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity) else:", "Fire(component=None, command=None, name=None): \"\"\"This function, Fire, is the main entrypoint", "arg) else: # We're past the args for which there's", "value = keyword.split('=', 1) got_argument = True elif is_bool_syntax: #", "for calling a function. \"\"\" parse_fn = parser.DefaultParseValue # We", "component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)): for i in result:", "calling a function. \"\"\" parse_fn = parser.DefaultParseValue # We check", "# there's a separator after it, and instead process the", "kwargs, remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw: # If we're", "# We'll try to access a member of the component.", "Args from which to consume in the search for the", "later processed by _ParseArgs, which converts them to the appropriate", "no next arg or the next arg is a Flag,", "variables['trace'] = component_trace if instance is not None: variables['self'] =", "Fire CLI command is run by consuming the arguments in", "the current component is a routine, call it using args", "this only finds argument names used by the function, specified", "# This is a named argument; get its value from", "remaining_args + [separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args", "cannot be consumed. ValueError: If --completion is specified but no", "= [ arg, arg.replace('-', '_'), # treat '-' as '_'.", "When all arguments are consumed and there's no function left", "as a string, but maybe it is as # another", "= remaining_args, [] else: varargs = [] for index, value", "class or call the final function unless # there's a", "arguments from the original args. \"\"\" kwargs = {} remaining_kwargs", "the call to Fire. name: Optional. The name of the", "using args from args. 2c. Otherwise access a member from", "last_component: remaining_args = [separator] + saved_args else: # It was", "place of defaults. Raises: FireError: if additional positional arguments are", "the `command` argument or from sys.argv by recursively traversing the", "The args that were consumed by getting this member. remaining_args:", "still be used as positional arguments. metadata: Metadata about the", "use with -i. instance = component elif isinstance(component, (list, tuple))", "remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index,", "is_bool_syntax: # Since there's no next arg or the next", "show_trace=show_trace) instance = None remaining_args = args while True: last_component", "command. Execution begins with the initial target component. The component", "varargs[index] = _ParseValue(value, None, None, metadata) varargs = parsed_args +", "to be # accepting **kwargs. if got_argument: skip_argument = not", "name: Optional. The name of the command as entered at", "json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\\n', ' ') def", "program is available on the `trace` property. This exception inherits", "verbose=component_trace.verbose) result = component_trace.GetResult() return result def CompletionScript(name, component): \"\"\"Returns", "arguments for *varargs. if fn_spec.varargs is not None: varargs, remaining_args", "a sequence of ' 'arguments.') # Determine the calling context.", "args[index + 1].startswith('--'))) if contains_equals: keyword, value = keyword.split('=', 1)", "index + 1 < len(args): value = args[index + 1]", "fn_args: A list of argument names that the target function", "+ 1].startswith('--'))) if contains_equals: keyword, value = keyword.split('=', 1) got_argument", "string or a sequence of arguments. FireExit: When Fire encounters", "varargs or kwargs names. fn_defaults: A list of the default", "The original function fn can then be called with fn(*varargs,", "list of the remaining arguments. Only if fn_keywords is None,", "arguments from after the separator. if remaining_args: remaining_args = remaining_args", "a member of current component, call the current component (if", "the main entrypoint for Python Fire. Executes a command either", "list of arguments fn_args: A list of argument names that", "not args: return kwargs, remaining_kwargs, remaining_args skip_argument = False for", "type for calling a function. \"\"\" parse_fn = parser.DefaultParseValue #", "of the Fire program is available on the `trace` property.", "the execution of the Fire command, eg in the traversal", "call the Fire function as your main method to create", "under the License is distributed on an \"AS IS\" BASIS,", "may still be used as positional arguments. metadata: Metadata about", "target == str(key): component = value found_target = True break", "of arguments, finds occurences of --name value, and uses 'name'", "arguments, but not the varargs or kwargs names. fn_keywords: The", "found_target = True break if not found_target: error = FireError(", "it using args from args. 2c. Otherwise access a member", "variables available at the call to Fire. name: Optional. The", "the function's argspec. This is the number of arguments without", "raise ValueError('Cannot make completion script without command name') script =", "if fn_spec.varkw is None and extra_kw: raise FireError('Unexpected kwargs present:',", "a member of the current component, call the current component", "separator. The separator defaults to a hyphen (-), and can", "fire.Fire(Component) A Fire CLI command is run by consuming the", "if no command is specified. args = sys.argv[1:] else: raise", "function accepts, including positional and named arguments, but not the", "target component. The component is updated by using the command", "includes a call to Fire. Eg: def main(argv): fire.Fire(Component) A", "raise a FireExit with code 0 if successful. \"\"\" name", "in the traversal of the members of the component, or", "remaining_args are the leftover args from the arguments to the", "value from this arg or the next. got_argument = False", "remaining_args) # Update the trace. if isclass: component_trace.AddInstantiatedClass( component, target,", "from __future__ import absolute_import from __future__ import division from __future__", "arguments, finds occurences of --name value, and uses 'name' as", "call to Fire. name: Optional. The name of the command.", "--completion is specified but no name available. \"\"\" args, flag_args", "arguments are consumed and there's no function left to call", "self.trace = component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints the result of", "elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help:", "remaining_args = remaining_args + [separator] + saved_args elif (inspect.isclass(last_component) or", "could have taken additional args. \"\"\" parse = _MakeParseFn(fn) (varargs,", "A list of argument names that the target function accepts,", "arg, metadata): \"\"\"Parses value, a string, into the appropriate type.", "command as entered at the command line. Used in interactive", "_ParseValue(value, index, arg, metadata): \"\"\"Parses value, a string, into the", "consuming an arg. consumed_args: The args that were consumed by", "if fn_keywords is None, this only finds argument names used", "in component: component = component[target] elif target.replace('-', '_') in component:", "arguments without # default values. All of these arguments are", "after the separator. if remaining_args: remaining_args = remaining_args + [separator]", "applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default']", "args as a list. if isinstance(command, six.string_types): args = shlex.split(command)", "or show_trace or show_completion): # Don't initialize the final class", "for index, arg in enumerate(fn_args): value = kwargs.pop(arg, None) if", "a subcomponent of component by consuming an arg from args.", "a member from that component, consuming one arg in the", "the defaults. parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items(): kwargs[key] =", "= parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose", "consume arguments:', remaining_args), initial_args) return component_trace if show_completion: if name", "the parse function for. Returns: A parse function for fn.", "show help is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace),", "elif isinstance(result, (bool, six.string_types, six.integer_types, float, complex)): print(result) elif result", "keyword = keyword[2:] value = 'False' else: value = 'True'", "keys. found_target = False for key, value in component.items(): if", "that cannot be consumed. ValueError: If --completion is specified but", "function. \"\"\" fn_spec = inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs", "= trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance =", "positional arguments are expected, but none are available. \"\"\" accepts_positional_args", "back in the arguments from after the separator. if remaining_args:", "= True if keyword in fn_args or fn_keywords: kwargs[keyword] =", "parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items(): kwargs[key] = _ParseValue(value, None,", "function. kwargs: The input dict kwargs modified with the used", "additional args. \"\"\" parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args,", "if additional positional arguments are expected, but none are available.", "elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult()", "interact from fire import parser from fire import trace import", "a target component using the args supplied. Arguments that come", "component = component[target.replace('-', '_')] else: # The target isn't present", "remaining_kwargs = [] remaining_args = [] if not args: return", "ANY KIND, either express or implied. # See the License", "the Fire command. Execution begins with the initial target component.", "the License. # You may obtain a copy of the", "as they are used. Args: fn_args: A list of argument", "\"\"\" result = {key: value for key, value in result.items()", "arg) def _CallCallable(fn, args): \"\"\"Calls the function fn by consuming", "args[:len(args) - len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args, capacity return", "# See the License for the specific language governing permissions", "= {} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args, context, name)", "in result.items() if _ComponentVisible(key, verbose)} if not result: return '{}'", "1. Parse any Flag args (the args after the final", "was not consumed, so it is still a remaining argument.", "a final isolated '--' are treated as Flags, eg for", "and remaining_args: # The component is a dict; we'll try", "if keyword in fn_args: value = 'True' elif keyword.startswith('no'): keyword", "function. \"\"\" parse_fn = parser.DefaultParseValue # We check to see", "entered at the command line. Used in interactive mode and", "it's own module. result = component_trace.GetResult() if isinstance(result, (list, set,", "License. \"\"\"Python Fire is a library for creating CLIs from", "place of the default separator, '-'. --trace: Get the Fire", "into component with argument:', arg) component_trace.AddError(error, initial_args) return component_trace remaining_args", "component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help: result", "the component, or in calling a function or instantiating a", "member. remaining_args: The remaining args that haven't been consumed yet.", "Include private members in help and usage information. -h --help:", "or a sequence of ' 'arguments.') # Determine the calling", "3b. Generate a completion script if that flag is provided.", "cannot consume an argument to get a member. \"\"\" members", "component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno) except FireError as error:", "separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args =", "if keyword in fn_args or fn_keywords: kwargs[keyword] = value else:", "representing the dict \"\"\" result = {key: value for key,", "component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise", "*varargs or **kwargs, there's always capacity. capacity = True extra_kw", "key, value in result.items() if _ComponentVisible(key, verbose)} if not result:", "arguments are required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly =", "= value found_target = True break if not found_target: error", "target, [target], filename, lineno) elif remaining_args: # We'll try to", "remaining_args and (show_help or interactive or show_trace or show_completion): #", "capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata):", "the call could have taken args in place of defaults.", "keyword.replace('-', '_') # In order for us to consume the", "None: component = context initial_component = component component_trace = trace.FireTrace(", "index of the value in the function's argspec. arg: The", "kwargs), consumed_args, remaining_args, capacity = parse(args) result = fn(*varargs, **kwargs)", "which there's no default value. # There's a default value", "else: value = 'True' else: if index + 1 <", "Determine the calling context. caller = inspect.stack()[1] caller_frame = caller[0]", "command line. value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) else:", "= False continue arg_consumed = False if argument.startswith('--'): # This", "FireError. The trace of the Fire program is available on", "not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result serialized to a single", "component, consuming one arg in the process. Args: component: The", "Returns: component: The component that was found by consuming an", "to True # Select unnamed args. parsed_args = [] for", "**kwargs, there's always capacity. capacity = True extra_kw = set(kwargs)", "no function left to call or class left to instantiate,", "args): \"\"\"Calls the function fn by consuming args from args.", "are caught and added to the FireTrace. \"\"\" class FireExit(SystemExit):", "target function. kwargs: The input dict kwargs modified with the", "for all Fire CLIs are: -v --verbose: Include private members", "call the final function unless # there's a separator after", "while True: last_component = component initial_args = remaining_args if not", "filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity = _CallCallable(", "args, this function gets a member from that component, consuming", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "return component_trace if last_component is initial_component: # If the initial", "\"\"\"Returns a dict as a string. Args: result: The dict", "set, types.GeneratorType)): for i in result: print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise", "= False if separator in remaining_args: # For the current", "# default values. All of these arguments are required. num_required_args", "original args. \"\"\" kwargs = {} remaining_kwargs = [] remaining_args", "by default if no command is specified. args = sys.argv[1:]", "that function with arguments 'arg1' and 'arg2'. Additional examples are", "kwargs modified with the used kwargs removed. remaining_args: A list", "writing, software # distributed under the License is distributed on", "if target == str(key): component = value found_target = True", "`component`'s members consuming arguments, evaluating functions, and instantiating classes as", "Don't force conversion to ascii. return json.dumps(result, ensure_ascii=False) except (TypeError,", "error: component_trace.AddError(error, initial_args) return component_trace if used_separator: # Add back", "If the current component is a class, instantiate it using", "selected. 3b. Generate a completion script if that flag is", "remaining arguments. Only if fn_keywords is None, this only finds", "are: -v --verbose: Include private members in help and usage", "instantiate, the resulting current component is the final result. Raises:", "exception raised by Fire to the client in the case", "remaining_args: remaining_args = remaining_args + [separator] + saved_args elif (inspect.isclass(last_component)", "an arg. consumed_args: The args that were consumed by getting", "if remaining_args: remaining_args = remaining_args + [separator] + saved_args elif", "we accept *varargs, then use all remaining arguments for *varargs.", "call could have taken additional args. \"\"\" parse = _MakeParseFn(fn)", "args that haven't been consumed yet. Raises: FireError: If we", "access a member. target = remaining_args[0] if target in component:", "is still a remaining argument. remaining_args.append(argument) return kwargs, remaining_kwargs, remaining_args", "1:] remaining_args = remaining_args[:separator_index] used_separator = True assert separator not", "index = int(arg) component = component[index] except (ValueError, IndexError): error", "but not the varargs or kwargs names. fn_defaults: A list", "allowed *varargs or **kwargs, there's always capacity. capacity = True", "command line interface. Simply call the Fire function as your", "be a string or a sequence of ' 'arguments.') #", "default value. kwargs: Dict with named command line arguments and", "= {} remaining_kwargs = [] remaining_args = [] if not", "contains_equals and (index + 1 == len(args) or args[index +", "a string or a sequence of arguments. FireExit: When Fire", "class FireExit(SystemExit): \"\"\"An exception raised by Fire to the client", "the final --) 2. Start with component as the current", "is specified at the command line. value = _ParseValue(value, index,", "function, typically from Fire decorators. Returns: parsed_args: A list of", "filename = None lineno = None component_trace.AddAccessedProperty( component, target, [target],", "of argument names that the target function accepts, including positional", "For example \"command fn arg1 arg2\" might access the \"fn\"", "result = component_trace.GetResult() if isinstance(result, (list, set, types.GeneratorType)): for i", "argument name for **kwargs, or None if **kwargs not used", "set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the list of `args`", "next arg or the next arg is a Flag, we", "command is specified. args = sys.argv[1:] else: raise ValueError('The command", "Args: code: (int) Exit code for the Fire CLI. component_trace:", "try: # Don't force conversion to ascii. return json.dumps(result, ensure_ascii=False)", "create the parse function for. Returns: A parse function for", "we're allowed *varargs or **kwargs, there's always capacity. capacity =", "capacity: Whether the call could have taken args in place", "return kwargs, remaining_kwargs, remaining_args def _ParseValue(value, index, arg, metadata): \"\"\"Parses", "Metadata about the function, typically from Fire decorators. Returns: parsed_args:", "to it's own module. result = component_trace.GetResult() if isinstance(result, (list,", "value=_OneLineResult(value)) lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns whether a", "will only ever be consumed up to a separator; a", "'_'), # treat '-' as '_'. ] for arg_name in", "fn_keywords): \"\"\"Parses the supplied arguments for keyword arguments. Given a", "capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the supplied arguments for", "component_trace.GetResult() return result def CompletionScript(name, component): \"\"\"Returns the text of", "fire import helputils from fire import inspectutils from fire import", "is_bool_syntax = ( not contains_equals and (index + 1 ==", "component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result", "a FireError. The trace of the Fire program is available", "resulting current component is the final result. Raises: ValueError: If", "\"\"\"An exception raised by Fire to the client in the", "# There's a default value for this arg. capacity =", "hyphen (-), and can be overwritten with the --separator Fire", "as Flags, eg for interactive mode or completion script generation.", "and added to the FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An exception", "[separator] + saved_args else: # It was an unnecessary separator.", "the varargs or kwargs names. fn_defaults: A list of the", "FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result", "def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs, remaining_args, metadata): \"\"\"Parses the positional", "the --separator Fire argument. Args: component: The target component for", "in place of defaults. Raises: FireError: if additional positional arguments", "component. try: target = remaining_args[0] component, consumed_args, remaining_args = _GetMember(", "or instantiate the current component (if it's a class). The", "result. Raises: ValueError: If the command argument is supplied, but", "args in place of defaults. Raises: FireError: if additional positional", "The remaining command line arguments, which may still be used", "the current component. 2a. If the current component is a", "that was found by consuming an arg. consumed_args: The args", "remaining_kwargs, remaining_args def _ParseValue(value, index, arg, metadata): \"\"\"Parses value, a", "used as positional arguments for calling the target function. kwargs:", "consuming an arg from args. Given a starting component and", "of the default values in the function argspec. num_required_args: The", "args after the final --) 2. Start with component as", "a member from component using an arg from args. 2d.", "values of the args as strings. They are later processed", "the arguments in the command in order to access a", "component begins as Component, and at each operation the component", "in named: parse_fn = named[arg] elif default is not None:", "here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional", "args remain. 3a. Embed into ipython REPL if interactive mode", "= parsed_flag_args.trace # component can be a module, class, routine,", "target, component) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename", "is not last_component: remaining_args = [separator] + saved_args else: #", "fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw:", "Execution begins with the initial target component. The component is", "class). When all arguments are consumed and there's no function", "code for the Fire CLI. component_trace: (FireTrace) The trace for", "component from which to get a member. args: Args from", "if **kwargs not used Returns: kwargs: A dictionary mapping keywords", "generating the completion script. Returns: The result of executing the", "in result.items(): line = format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line) return", "+ 1 == len(args) or args[index + 1].startswith('--'))) if contains_equals:", "\"\"\"Returns result serialized to a single line string.\"\"\" # TODO:", "Fire function as your main method to create a CLI.", "lineno, capacity) except FireError as error: component_trace.AddError(error, initial_args) return component_trace", "Modifies kwargs, removing args as they are used. Args: fn_args:", "values. remaining_kwargs: A list of the unused kwargs from the", "object into a command line interface. Simply call the Fire", "for key, value in result.items() if _ComponentVisible(key, verbose)} if not", "FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace)", "component is a tuple or list; we'll try to access", "(ValueError, IndexError): error = FireError( 'Unable to index into component", "FireError, Fire will raise a FireExit with code 2. When", "interactive mode and for generating the completion script. Returns: The", "tuples, etc. They all work! Python Fire turns any Python", "kwargs: The input dict kwargs modified with the used kwargs", "extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is None and", "Inc. # # Licensed under the Apache License, Version 2.0", "= remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index]", "_ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs or", "the License. \"\"\"Python Fire is a library for creating CLIs", "named command line arguments and their values. remaining_args: The remaining", "Bash completion script for a Fire CLI.\"\"\" return completion.Script(name, component)", "\"\"\"Returns whether a component should be visible in the output.\"\"\"", "tracing Fire's execution path as it consumes args. Raises: ValueError:", "another type. # TODO: Consider alternatives for accessing non-string keys.", "list of values to be used as positional arguments for", "= keyword.replace('-', '_') # In order for us to consume", "argument; get its value from this arg or the next.", "\"\"\" args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args", "verbose or not isinstance(component, six.string_types) or not component.startswith('_')) def _OneLineResult(result):", "result def CompletionScript(name, component): \"\"\"Returns the text of the Bash", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "for which there's no default value. # There's a default", "component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr)", "to consume for calling the function. Returns: component: The object", "It was an unnecessary separator. remaining_args = saved_args if component", "'{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to show help", "filename, lineno, capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno,", "kwargs[key] = _ParseValue(value, None, key, metadata) return parsed_args, kwargs, remaining_args,", "access a member of the current component, call the current", "step will never consume arguments from both sides of a", "and not is_bool_syntax arg_consumed = True if keyword in fn_args", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "goes. When building a CLI with Fire, your main method", "accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If we see", "parser from fire import trace import six def Fire(component=None, command=None,", "the current component, call the current component (if it's a", "A list of the supplied args that have not been", "saved_args if component is last_component and remaining_args == initial_args: #", "Fire. name: Optional. The name of the command. Used in", "This returns the values of the args as strings. They", "arguments fn_args: A list of argument names that the target", "isinstance(component, six.string_types) or not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result serialized", "(varargs, kwargs), remaining_args. The original function fn can then be", "or fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index", "Otherwise access a member from component using an arg from", "show_completion): # Don't initialize the final class or call the", "to create a CLI. When using Fire to build a", "the current component is a class, instantiate it using args", "If there are arguments that cannot be consumed. ValueError: If", "for the command. -i --interactive: Drop into a Python REPL", "keyword, or we need to be # accepting **kwargs. if", "not contains_equals and not is_bool_syntax arg_consumed = True if keyword", "if name is not None: variables[name] = initial_component variables['component'] =", "removing args as they are used. Args: fn_args: A list", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "error = FireError( 'Cannot find target in dict:', target, component)", "= keyword.split('=', 1) got_argument = True elif is_bool_syntax: # Since", "This exception inherits from SystemExit, so clients may explicitly catch", "function used to parse value is determined by the remaining", "sys.argv[1:] else: raise ValueError('The command argument must be a string", "False for index, argument in enumerate(args): if skip_argument: skip_argument =", "False continue arg_consumed = False if argument.startswith('--'): # This is", "# TODO: Ensure line is fewer than eg 120 characters.", "FireExit with code 0 if successful. \"\"\" name = name", "arg2\" might access the \"fn\" property of the initial target", "context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args, context, name) if component_trace.HasError():", "= _ParseValue(value, index, arg, metadata) parsed_args.append(value) else: # No value", "The function to call or class to instantiate. args: Args", "Fire, your main method should call this function. Args: component:", "+ 1) lines = [] for key, value in result.items():", "and extra_kw: raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly = set(required_kwonly)", "is None and extra_kw: raise FireError('Unexpected kwargs present:', extra_kw) missing_kwonly", "explicitly specified. if remaining_args and accepts_positional_args: # Use a positional", "script if that flag is provided. In step 2, arguments", "and instead process the current component. break saved_args = []", "arguments without a default value. kwargs: Dict with named command", "Raises: FireError: if additional positional arguments are expected, but none", "False for key, value in component.items(): if target == str(key):", "defaults. parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items(): kwargs[key] = _ParseValue(value,", "six.integer_types, float, complex)): print(result) elif result is not None: print(helputils.HelpString(result,", "to be a boolean. got_argument = True if keyword in", "next component. Returns: component: The component that was found by", "is the result of the function call. consumed_args: The args", "returns (varargs, kwargs), remaining_args. The original function fn can then", "six.string_types, six.integer_types, float, complex)): print(result) elif result is not None:", "main method should call this function. Args: component: The initial", "types from fire import completion from fire import decorators from", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "completion script. Returns: The result of executing the Fire command.", "(if it's a function), or instantiate the current component (if", "it's a class). When all arguments are consumed and there's", "be consumed up to a separator; a single step will", "parsed_args, kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs,", "= _ParseValue(value, None, None, metadata) varargs = parsed_args + varargs", "parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace", "the preceding operation. For example \"command fn arg1 arg2\" might", "a class). When all arguments are consumed and there's no", "completion script for the tool to stdout. --separator SEPARATOR: Use", "# Use the command line args by default if no", "args: return kwargs, remaining_kwargs, remaining_args skip_argument = False for index,", "any Python object: functions, classes, modules, objects, dictionaries, lists, tuples,", "flags:', missing_kwonly) # If we accept *varargs, then use all", "consuming the arguments in the command in order to access", "of the Fire command, eg in the traversal of the", "evaluating functions, and instantiating classes as it goes. When building", "with the initial target component. The component is updated by", "specific language governing permissions and # limitations under the License.", "function as your main method to create a CLI. When", "__future__ import print_function import inspect import json import os import", "successful. \"\"\" name = name or os.path.basename(sys.argv[0]) # Get args", "caught and added to the FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An", "eg in the traversal of the members of the component,", "after it, and instead process the current component. break saved_args", "arg in the process. Args: component: The component from which", "args as they are used. Args: fn_args: A list of", "else: # No value has been explicitly specified. if remaining_args", "elif component_trace.show_help: result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr)", "--verbose: Include private members in help and usage information. -h", "component that was found by consuming an arg. consumed_args: The", "None component_trace.AddAccessedProperty( component, index, [arg], filename, lineno) elif isinstance(component, dict)", "= True assert separator not in remaining_args if inspect.isclass(component) or", "with the --separator Fire argument. Args: component: The target component", "executing the Fire command. Execution begins with the initial target", "The result of executing the Fire command. Execution begins with", "= _GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component,", "line arguments, which may still be used as positional arguments.", "\"\"\"This function, Fire, is the main entrypoint for Python Fire.", "stacktrace. \"\"\" def __init__(self, code, component_trace): \"\"\"Constructs a FireExit exception.", "to stdout in a human readable way.\"\"\" # TODO: Design", "= component elif isinstance(component, (list, tuple)) and remaining_args: # The", "saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator() elif", "yet. capacity: Whether the call could have taken additional args.", "default values. All of these arguments are required. num_required_args =", "and 'value' as the value. Constructs and returns a dictionary", "index < len(positional): parse_fn = positional[index] elif arg in named:", "interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args): \"\"\"Returns a", "help and usage information for the command. -i --interactive: Drop", "this member. remaining_args: The remaining args that haven't been consumed", "args. Args: fn: The function to call or class to", "consumed for the function call. remaining_args: The remaining args that", "default separator, '-'. --trace: Get the Fire Trace for the", "use arguments up to the separator. separator_index = remaining_args.index(separator) saved_args", "Bash completion script for the tool to stdout. --separator SEPARATOR:", "component is a class or a routine; we'll try to", "# you may not use this file except in compliance", "raise FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr)", "in the arguments from after the separator. if remaining_args: remaining_args", "Returns: A string representing the dict \"\"\" result = {key:", "= _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args) result", "name=None): \"\"\"Execute a Fire command on a target component using", "separator after it, and instead process the current component. break", "all_args, fn_spec.varkw) # Note: _ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args,", "the argument the value is being parsed for. metadata: Metadata", "FireError: If we cannot consume an argument to get a", "break if not found_target: error = FireError( 'Cannot find target", "a list of strings is preferred. name: Optional. The name", "< len(positional): parse_fn = positional[index] elif arg in named: parse_fn", "typically a command line argument. index: The index of the", "value in result.items(): line = format_string.format(key=str(key) + ':', value=_OneLineResult(value)) lines.append(line)", "directory. Fire Flags, common to all Fire CLIs, must go", "consumed. ValueError: If --completion is specified but no name available.", "not None and 0 <= index < len(positional): parse_fn =", "not contains_equals and (index + 1 == len(args) or args[index", "parse value is determined by the remaining arguments. Args: value:", "to a string verbose: Whether to include 'hidden' members, those", "`except FireExit`. If not caught, this exception will cause the", "_ParseValue(value, index, arg, metadata) parsed_args.append(value) elif index < num_required_args: raise", "component): \"\"\"Returns the text of the Bash completion script for", "remaining_args: The remaining args that haven't been consumed yet. Raises:", "The args that were consumed for the function call. remaining_args:", "arg_name in arg_names: if arg_name in members: return members[arg_name], [arg],", "serialized to a single line string.\"\"\" # TODO: Ensure line", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "from which to get a member. args: Args from which", "metadata): \"\"\"Parses value, a string, into the appropriate type. The", "the function. Returns: component: The object that is the result", "else: raise ValueError('The command argument must be a string or", "call this function. Args: component: The initial target component. command:", "command, eg in the traversal of the members of the", "the command line. Used in interactive mode and for generating", "args. 2b. If the current component is a routine, call", "set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is None and extra_kw: raise", "result: The dict to convert to a string verbose: Whether", "# and move serialization to it's own module. result =", "component is last_component and remaining_args == initial_args: # We're making", "the supplied args that have not been used yet. capacity:", "Design human readable deserializable serialization method # and move serialization", "the command argument is supplied, but not a string or", "class, routine, object, etc. if component is None: component =", "component using an arg from args. 2d. Repeat 2a-2c until", "missing_kwonly = set(required_kwonly) - set(kwargs) if missing_kwonly: raise FireError('Missing required", "--completion: Write the Bash completion script for the tool to", "num_required_args is the number of positional arguments without # default", "Parse any Flag args (the args after the final --)", "under the Apache License, Version 2.0 (the \"License\"); # you", "in the function argspec. num_required_args: The number of required arguments", "then use all remaining arguments for *varargs. if fn_spec.varargs is", "= parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help =", "of the current component, call the current component (if it's", "than eg 120 characters. if isinstance(result, six.string_types): return str(result).replace('\\n', '", "None, key, metadata) return parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args,", "import shlex import sys import types from fire import completion", "in interactive mode and in the tab completion script. Returns:", "2d. Repeat 2a-2c until no args remain. 3a. Embed into", "assert separator not in remaining_args if inspect.isclass(component) or inspect.isroutine(component): #", "sys import types from fire import completion from fire import", "] for arg_name in arg_names: if arg_name in members: return", "arg from args. Given a starting component and args, this", "= _CallCallable( component, remaining_args) # Update the trace. if isclass:", "component, remaining_args) # Update the trace. if isclass: component_trace.AddInstantiatedClass( component,", "filename, lineno = inspectutils.GetFileAndLine(component) component_trace.AddAccessedProperty( component, target, consumed_args, filename, lineno)", "as '_'. ] for arg_name in arg_names: if arg_name in", "If we're allowed *varargs or **kwargs, there's always capacity. capacity", "+ varargs remaining_args += remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)]", "component is a routine, call it using args from args.", "with code 2. When used with the help or trace", "= context.copy() if name is not None: variables[name] = initial_component", "additional positional arguments are expected, but none are available. \"\"\"", "A list of the default values in the function argspec.", "return kwargs, remaining_kwargs, remaining_args skip_argument = False for index, argument", "value in result.items() if _ComponentVisible(key, verbose)} if not result: return", "= command elif command is None: # Use the command", "consume arg:', arg) def _CallCallable(fn, args): \"\"\"Calls the function fn", "try: index = int(arg) component = component[index] except (ValueError, IndexError):", "**kwargs. if got_argument: skip_argument = not contains_equals and not is_bool_syntax", "is the final result. Raises: ValueError: If the command argument", "mode and in the tab completion script. Returns: FireTrace of", "list of the unused arguments from the original args. \"\"\"", "= True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is", "line argument. index: The index of the value in the", "is a library for creating CLIs from absolutely any Python", "tuple): print(_OneLineResult(result)) elif isinstance(result, (bool, six.string_types, six.integer_types, float, complex)): print(result)", "remaining_args[0] try: index = int(arg) component = component[index] except (ValueError,", "specified. args = sys.argv[1:] else: raise ValueError('The command argument must", "in the case of a FireError. The trace of the", "# If we see a default get used, we'll set", "_ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args,", "'True' else: if index + 1 < len(args): value =", "returns a list of the remaining arguments. Only if fn_keywords", "is available on the `trace` property. This exception inherits from", "and global variables available at the call to Fire. name:", "to show help is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire", "file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace,", "Fire call to stdout in a human readable way.\"\"\" #", "remaining_args, [] else: varargs = [] for index, value in", "with named command line arguments and their values. remaining_args: The", "component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance", "available supplied args. Modifies kwargs, removing args as they are", "functions, and instantiating classes as it goes. When building a", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "remaining_args if not remaining_args and (show_help or interactive or show_trace", "value in enumerate(varargs): varargs[index] = _ParseValue(value, None, None, metadata) varargs", "FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An exception raised by Fire to", "or inspect.isroutine(component): # The component is a class or a", "Raises: ValueError: If the command argument is supplied, but not", "should call this function. Args: component: The initial target component.", "of components starting with component, tracing Fire's execution path as", "index, arg, metadata): \"\"\"Parses value, a string, into the appropriate", "the initial target component, and then call that function with", "be visible in the output.\"\"\" return ( verbose or not", "args. Modifies kwargs, removing args as they are used. Args:", "initial_component variables['result'] = component variables['trace'] = component_trace if instance is", "= parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named'] if index", "# A value is specified at the command line. value", "we cannot consume an argument to get a member. \"\"\"", "# Don't initialize the final class or call the final", "remaining_args: A list of the supplied args that have not", "import sys import types from fire import completion from fire", "consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args) # Update the", "[] for key, value in result.items(): line = format_string.format(key=str(key) +", "serialization method # and move serialization to it's own module.", "consumed_args, remaining_args = _GetMember( component, remaining_args) filename, lineno = inspectutils.GetFileAndLine(component)", "if _ComponentVisible(key, verbose)} if not result: return '{}' longest_key =", "args: A list of arguments fn_args: A list of argument", "int(arg) component = component[index] except (ValueError, IndexError): error = FireError(", "+ 1]) if not arg_consumed: # The argument was not", "_ComponentVisible(key, verbose)} if not result: return '{}' longest_key = max(len(str(key))", "as strings. They are later processed by _ParseArgs, which converts", "--help: Provide help and usage information for the command. -i", "index into component with argument:', arg) component_trace.AddError(error, initial_args) return component_trace", "Args: component: The target component for Fire. args: A list", "Python object. You can call Fire on any Python object:", "no args remain. 3a. Embed into ipython REPL if interactive", "named = parse_fns['named'] if index is not None and 0", "filename, lineno) elif isinstance(component, dict) and remaining_args: # The component", "fn metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default", "string, but maybe it is as # another type. #", "= inspect.stack()[1] caller_frame = caller[0] caller_globals = caller_frame.f_globals caller_locals =", "be a module, class, routine, object, etc. if component is", "and returns a dictionary of these keyword arguments, and returns", "args, context, name) if component_trace.HasError(): for help_flag in ['-h', '--help']:", "result is not None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False):", "haven't been consumed yet. capacity: Whether the call could have", "= kwargs.pop(arg, None) if value is not None: # A", "'_')] else: # The target isn't present in the dict", "parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional =", "parse function accepts a list of arguments and returns (varargs,", "usage information. -h --help: Provide help and usage information for", "is provided. In step 2, arguments will only ever be", "Trace for the command. \"\"\" from __future__ import absolute_import from", "= '=' in keyword is_bool_syntax = ( not contains_equals and", "for *varargs. if fn_spec.varargs is not None: varargs, remaining_args =", "called with fn(*varargs, **kwargs). The remaining_args are the leftover args", "arg: The name of the argument the value is being", "completion from fire import decorators from fire import helputils from", "these arguments are required. num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly", "decorators from fire import helputils from fire import inspectutils from", "-h --help: Provide help and usage information for the command.", "value. Constructs and returns a dictionary of these keyword arguments,", "trace import six def Fire(component=None, command=None, name=None): \"\"\"This function, Fire,", "component, target, [target], filename, lineno) elif remaining_args: # We'll try", "consumed_args: The args that were consumed for the function call.", "available. \"\"\" args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args,", "# Use a positional arg. value = remaining_args.pop(0) value =", "# index into the defaults. parsed_args.append(fn_defaults[default_index]) for key, value in", "{} context.update(caller_globals) context.update(caller_locals) component_trace = _Fire(component, args, context, name) if", "arguments. Given a list of arguments, finds occurences of --name", "= 'True' else: if index + 1 < len(args): value", "# If the initial component is a class, keep an", "_ParseValue(value, index, arg, metadata) parsed_args.append(value) else: # No value has", "consumed_args: The args that were consumed by getting this member.", "we'll try to access a member. arg = remaining_args[0] try:", "args[index + 1] got_argument = True keyword = keyword.replace('-', '_')", "if interactive: variables = context.copy() if name is not None:", "defaults. Raises: FireError: if additional positional arguments are expected, but", "in calling a function or instantiating a class found during", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "value for the required argument:', arg) else: # We're past", "all_args = fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note:", "be a string or a list of strings; a list", "of the component. try: target = remaining_args[0] component, consumed_args, remaining_args", "proper way to show help is {cmd}.\\n' 'Showing help anyway.\\n').format(cmd=pipes.quote(command)),", "args. \"\"\" kwargs = {} remaining_kwargs = [] remaining_args =", "[separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args", "# Note: _ParseArgs modifies kwargs. parsed_args, kwargs, remaining_args, capacity =", "name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables =", "supplied, but not a string or a sequence of arguments.", "# The component is a dict; we'll try to access", "FireError as error: component_trace.AddError(error, initial_args) return component_trace if used_separator: #", "can be a string or a list of strings; a", "or from sys.argv by recursively traversing the target object `component`'s", "for calling the function. Returns: component: The object that is", "we'll set capacity to True # Select unnamed args. parsed_args", "= FireError( 'Unable to index into component with argument:', arg)", "help for a command you might run: `command -- --help`.", "initial target component. command: Optional. If supplied, this is the", "kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note:", "Apache License, Version 2.0 (the \"License\"); # you may not", "arg_name in members: return members[arg_name], [arg], args[1:] raise FireError('Could not", "build a CLI, your main method includes a call to", "to access a member of current component, call the current", "either express or implied. # See the License for the", "a library for creating CLIs from absolutely any Python object.", "remaining command line arguments, which may still be used as", "the completion script. Returns: The result of executing the Fire", "(show_help or interactive or show_trace or show_completion): # Don't initialize", "force conversion to ascii. return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError):", "to parse value is determined by the remaining arguments. Args:", "to be parsed, typically a command line argument. index: The", "The function or class to create the parse function for.", "remaining_args, metadata): \"\"\"Parses the positional and named arguments from the", "into the appropriate type. The function used to parse value", "routine, object, etc. if component is None: component = context", "a default value for this arg. capacity = True default_index", "isinstance(result, (bool, six.string_types, six.integer_types, float, complex)): print(result) elif result is", "the Fire call to stdout in a human readable way.\"\"\"", "to call or class to instantiate. args: Args from which", "and remaining_args: # The component is a tuple or list;", "'Showing help anyway.\\n').format(cmd=pipes.quote(command)), file=sys.stderr) print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult()", "# We're past the args for which there's no default", "and usage information for the command. -i --interactive: Drop into", "args from the arguments to the parse function. \"\"\" fn_spec", "the available supplied args. Modifies kwargs, removing args as they", "the client in the case of a FireError. The trace", "final --) 2. Start with component as the current component.", "from after the separator. if remaining_args: remaining_args = remaining_args +", "= remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty( component,", "by this method are: 1. Parse any Flag args (the", "[] for index, arg in enumerate(fn_args): value = kwargs.pop(arg, None)", "= [] for key, value in result.items(): line = format_string.format(key=str(key)", "isn't present in the dict as a string, but maybe", "kwargs: A dictionary mapping keywords to values. remaining_kwargs: A list", "'hidden' members, those keys starting with _. Returns: A string", "Fire. Eg: def main(argv): fire.Fire(Component) A Fire CLI command is", "else: # We're past the args for which there's no", "is last_component and remaining_args == initial_args: # We're making no", "Update the trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename,", "get help for a command you might run: `command --", "be used as positional arguments. metadata: Metadata about the function,", "component component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace)", "This is a named argument; get its value from this", "current component (if it's a class). The target component begins", "elif component is not last_component: remaining_args = [separator] + saved_args", "usually from the command line. context: A dict with the", "None if **kwargs not used Returns: kwargs: A dictionary mapping", "default if no command is specified. args = sys.argv[1:] else:", "if isinstance(result, (list, set, types.GeneratorType)): for i in result: print(_OneLineResult(i))", "flag is provided. In step 2, arguments will only ever", "Optional. The name of the command. Used in interactive mode", "current component. break saved_args = [] used_separator = False if", "which converts them to the appropriate type. Args: args: A", "for the Fire CLI. component_trace: (FireTrace) The trace for the", "the calling context. caller = inspect.stack()[1] caller_frame = caller[0] caller_globals", "the target function accepts, including positional and named arguments, but", "permissions and # limitations under the License. \"\"\"Python Fire is", "str(key): component = value found_target = True break if not", "line args by default if no command is specified. args", "script without command name') script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if", "original function fn can then be called with fn(*varargs, **kwargs).", "the leftover args from the arguments to the parse function.", "we either: # Need to be explicitly expecting the keyword,", "varargs = [] for index, value in enumerate(varargs): varargs[index] =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "without a stacktrace. \"\"\" def __init__(self, code, component_trace): \"\"\"Constructs a", "if successful. \"\"\" name = name or os.path.basename(sys.argv[0]) # Get", "list of the supplied args that have not been used", "process the current component. break saved_args = [] used_separator =", "calling context. caller = inspect.stack()[1] caller_frame = caller[0] caller_globals =", "yet. capacity: Whether the call could have taken args in", "parse function for fn. Args: fn: The function or class", "index, arg, metadata) parsed_args.append(value) elif index < num_required_args: raise FireError(", "make completion script without command name') script = CompletionScript(name, initial_component)", "from the function's argspec. This is the number of arguments", "are available in the examples directory. Fire Flags, common to", "return ( verbose or not isinstance(component, six.string_types) or not component.startswith('_'))", "to get a member. args: Args from which to consume", "for key, value in result.items(): line = format_string.format(key=str(key) + ':',", "case of a FireError. The trace of the Fire program", "num_required_args = len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults)", "= parsed_args + varargs remaining_args += remaining_kwargs consumed_args = args[:len(args)", "capacity = _CallCallable( component, remaining_args) # Update the trace. if", "parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace # component can", "Optional. The name of the command as entered at the", "not None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode() return component_trace", "or a routine; we'll try to initialize it or #", "' ') try: # Don't force conversion to ascii. return", "Fire function, but rather are caught and added to the", "if any parse function from the fn metadata applies here.", "visible in the output.\"\"\" return ( verbose or not isinstance(component,", "in remaining_args if inspect.isclass(component) or inspect.isroutine(component): # The component is", "component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno, capacity) except FireError as", "FireError( 'Unable to index into component with argument:', arg) component_trace.AddError(error,", "' ') def _Fire(component, args, context, name=None): \"\"\"Execute a Fire", "a CLI with Fire, your main method should call this", "the Fire command, eg in the traversal of the members", "the current component (if it's a class). The target component", "the tool to stdout. --separator SEPARATOR: Use SEPARATOR in place", "caller_frame = caller[0] caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals context", "fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata) if fn_spec.varargs or fn_spec.varkw: #", "value for this arg. capacity = True default_index = index", "arguments to either access a member of the current component,", "True keyword = keyword.replace('-', '_') # In order for us", "dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result, (bool,", "a completion script if that flag is provided. In step", "The remaining_args are the leftover args from the arguments to", "2a-2c until no args remain. 3a. Embed into ipython REPL", "pipes import shlex import sys import types from fire import", "helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace and", "context initial_component = component component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator,", "remaining_args = [separator] + saved_args else: # It was an", "has been explicitly specified. if remaining_args and accepts_positional_args: # Use", "= set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is None and extra_kw:", "for keyword arguments. Given a list of arguments, finds occurences", "it using args from args. 2b. If the current component", "') try: # Don't force conversion to ascii. return json.dumps(result,", "raised by the Fire function, but rather are caught and", "and then call that function with arguments 'arg1' and 'arg2'.", "= _Fire(component, args, context, name) if component_trace.HasError(): for help_flag in", "and can be overwritten with the --separator Fire argument. Args:", "True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw is None", "True # Select unnamed args. parsed_args = [] for index,", "all Fire CLIs are: -v --verbose: Include private members in", "value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1]) if not", "(varargs, kwargs), consumed_args, remaining_args, capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults,", "of the Bash completion script for a Fire CLI.\"\"\" return", "key in result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines", "varargs = parsed_args + varargs remaining_args += remaining_kwargs consumed_args =", "members: return members[arg_name], [arg], args[1:] raise FireError('Could not consume arg:',", "command argument is supplied, but not a string or a", "finds occurences of --name value, and uses 'name' as the", "print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace,", "\"\"\"Execute a Fire command on a target component using the", "accessing non-string keys. found_target = False for key, value in", "a human readable way.\"\"\" # TODO: Design human readable deserializable", "the command line args by default if no command is", "longest_key = max(len(str(key)) for key in result.keys()) format_string = '{{key:{padding}s}}", "False # If we see a default get used, we'll", "the args supplied. Arguments that come after a final isolated", "not result: return '{}' longest_key = max(len(str(key)) for key in", "# Get args as a list. if isinstance(command, six.string_types): args", "True if keyword in fn_args or fn_keywords: kwargs[keyword] = value", "keyword in fn_args or fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument)", "target component begins as Component, and at each operation the", "If the command argument is supplied, but not a string", "consumed_args, filename, lineno) except FireError as error: component_trace.AddError(error, initial_args) return", "helputils from fire import inspectutils from fire import interact from", "isinstance(result, dict): print(_DictAsString(result, verbose)) elif isinstance(result, tuple): print(_OneLineResult(result)) elif isinstance(result,", "arguments and their values. remaining_args: The remaining command line arguments,", "consume arguments from both sides of a separator. The separator", "for. metadata: Metadata about the function, typically from Fire decorators.", "as your main method to create a CLI. When using", "access a member of current component, call the current component", "[] remaining_args = [] if not args: return kwargs, remaining_kwargs,", "the appropriate type for calling a function. \"\"\" parse_fn =", "Other arguments are consumed by the execution of the Fire", "the Bash completion script for a Fire CLI.\"\"\" return completion.Script(name,", "remaining_kwargs.append(args[index + 1]) if not arg_consumed: # The argument was", "string. Args: result: The dict to convert to a string", "value is being parsed for. metadata: Metadata about the function,", "members, those keys starting with _. Returns: A string representing", "to the FireTrace. \"\"\" class FireExit(SystemExit): \"\"\"An exception raised by", "a component should be visible in the output.\"\"\" return (", "from fire import inspectutils from fire import interact from fire", "this flag to be a boolean. got_argument = True if", "elif isinstance(component, (list, tuple)) and remaining_args: # The component is", "of the value in the function's argspec. arg: The name", "= {key: value for key, value in result.items() if _ComponentVisible(key,", "component_trace, verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns a dict as a", "fn_spec.varargs or fn_spec.varkw: # If we're allowed *varargs or **kwargs,", "use this file except in compliance with the License. #", "and # limitations under the License. \"\"\"Python Fire is a", "argument is supplied, but not a string or a sequence", "metadata) return parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords):", "the Bash completion script for the tool to stdout. --separator", "kwargs, remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args,", "specified at the command line. value = _ParseValue(value, index, arg,", "of the preceding operation. For example \"command fn arg1 arg2\"", "members = dict(inspect.getmembers(component)) arg = args[0] arg_names = [ arg,", "supplied, then the command is taken from sys.argv instead. This", "{{value}}'.format(padding=longest_key + 1) lines = [] for key, value in", "a member. \"\"\" members = dict(inspect.getmembers(component)) arg = args[0] arg_names", "Fire to build a CLI, your main method includes a", "= [] remaining_args = [] if not args: return kwargs,", "parse_fns['default'] positional = parse_fns['positional'] named = parse_fns['named'] if index is", "flags for all Fire CLIs are: -v --verbose: Include private", "have not been used yet. capacity: Whether the call could", "lineno, capacity) else: component_trace.AddCalledRoutine( component, target, consumed_args, filename, lineno, capacity)", "Args: result: The dict to convert to a string verbose:", "capacity = True default_index = index - num_required_args # index", "Args: component: The component from which to get a member.", "by recursively traversing the target object `component`'s members consuming arguments,", "the separator. if remaining_args: remaining_args = remaining_args + [separator] +", "function accepts a list of arguments and returns (varargs, kwargs),", "list. if isinstance(command, six.string_types): args = shlex.split(command) elif isinstance(command, (list,", "component, or in calling a function or instantiating a class", "= remaining_args + [separator] + saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)):", "target component, and then call that function with arguments 'arg1'", "code 0 if successful. \"\"\" name = name or os.path.basename(sys.argv[0])", "index - num_required_args # index into the defaults. parsed_args.append(fn_defaults[default_index]) for", "'arguments.') # Determine the calling context. caller = inspect.stack()[1] caller_frame", "instantiate the current component (if it's a class). The target", "original args. remaining_args: A list of the unused arguments from", "2. When used with the help or trace flags, Fire", "_OneLineResult(result): \"\"\"Returns result serialized to a single line string.\"\"\" #", "consumes args. Raises: ValueError: If there are arguments that cannot", "or # call it. isclass = inspect.isclass(component) try: target =", "a class, keep an instance for use with -i. instance", "the value in the function's argspec. arg: The name of", "Args: fn: The function or class to create the parse", "= int(arg) component = component[index] except (ValueError, IndexError): error =", "need to be # accepting **kwargs. if got_argument: skip_argument =", "__init__(self, code, component_trace): \"\"\"Constructs a FireExit exception. Args: code: (int)", "for **kwargs, or None if **kwargs not used Returns: kwargs:", "names. fn_defaults: A list of the default values in the", "it or # call it. isclass = inspect.isclass(component) try: target", "in fn_args or fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument) if", "Fire command, eg in the traversal of the members of", "separator in remaining_args: # For the current component, only use", "unused arguments from the original args. \"\"\" kwargs = {}", "completion script for a Fire CLI.\"\"\" return completion.Script(name, component) class", "def _CallCallable(fn, args): \"\"\"Calls the function fn by consuming args", "arguments 'arg1' and 'arg2'. Additional examples are available in the", "component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args, remaining_args, capacity =", "-- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to show help is", "either access a member of the current component, call the", "call it using args from args. 2c. Otherwise access a", "remaining_args = saved_args component_trace.AddSeparator() elif component is not last_component: remaining_args", "FireError('Could not consume arg:', arg) def _CallCallable(fn, args): \"\"\"Calls the", "in compliance with the License. # You may obtain a", "The argument was not consumed, so it is still a", "ValueError: If there are arguments that cannot be consumed. ValueError:", "a keyword arg, we either: # Need to be explicitly", "software # distributed under the License is distributed on an", "# Note: num_required_args is the number of positional arguments without", "required flags:', missing_kwonly) # If we accept *varargs, then use", "string or a list of strings; a list of strings", "metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns: default = parse_fns['default'] positional = parse_fns['positional'] named", "these keyword arguments, and returns a list of the remaining", "component by consuming an arg from args. Given a starting", "line arguments and their values. remaining_args: The remaining command line", "= dict(inspect.getmembers(component)) arg = args[0] arg_names = [ arg, arg.replace('-',", "or trace flags, Fire will raise a FireExit with code", "not None: # A value is specified at the command", "received no value for the required argument:', arg) else: #", "2. Start with component as the current component. 2a. If", "remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:] remaining_args = remaining_args[:separator_index] used_separator", "arg_names = [ arg, arg.replace('-', '_'), # treat '-' as", "the next arg is a Flag, we consider # this", "else: # The target isn't present in the dict as", "we'll try to access a member. target = remaining_args[0] if", "is specified. args = sys.argv[1:] else: raise ValueError('The command argument", "positional arguments for calling the target function. kwargs: The input", "or not isinstance(component, six.string_types) or not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns", "is not None: variables[name] = initial_component variables['component'] = initial_component variables['result']", "line. value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) else: #", "used yet. capacity: Whether the call could have taken args", "try: target = component.__name__ filename, lineno = inspectutils.GetFileAndLine(component) component, consumed_args,", "type. # TODO: Consider alternatives for accessing non-string keys. found_target", "if name is None: raise ValueError('Cannot make completion script without", "a member. arg = remaining_args[0] try: index = int(arg) component", "of arguments without a default value. kwargs: Dict with named", "not a string or a sequence of arguments. FireExit: When", "Fire CLI. component_trace: (FireTrace) The trace for the Fire command.", "shlex import sys import types from fire import completion from", "120 characters. if isinstance(result, six.string_types): return str(result).replace('\\n', ' ') try:", "the original args. remaining_args: A list of the unused arguments", "the component. try: target = remaining_args[0] component, consumed_args, remaining_args =", "we consider # this flag to be a boolean. got_argument", "else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return result def CompletionScript(name,", "+ 1:] remaining_args = remaining_args[:separator_index] used_separator = True assert separator", "consumed_args, remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates a parse function for", "after a separating \"--\". For example, to get help for", "not raised by the Fire function, but rather are caught", "script = CompletionScript(name, initial_component) component_trace.AddCompletionScript(script) if interactive: variables = context.copy()", "function fn by consuming args from args. Args: fn: The", "metadata) parsed_args.append(value) elif index < num_required_args: raise FireError( 'The function", "path as it consumes args. Raises: ValueError: If there are", "component, and then call that function with arguments 'arg1' and", "--interactive: Drop into a Python REPL after running the command.", "parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive", "used to parse value is determined by the remaining arguments.", "the current component, only use arguments up to the separator.", "we see a default get used, we'll set capacity to", "remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty( component, index,", "values. All of these arguments are required. num_required_args = len(fn_spec.args)", "not consume arg:', arg) def _CallCallable(fn, args): \"\"\"Calls the function", "component, call the current component (if it's a function), or", "any Flag args (the args after the final --) 2.", "The target isn't present in the dict as a string,", "capacity = False # If we see a default get", "component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result =", "to instantiate. args: Args from which to consume for calling", "metadata) if fn_spec.varargs or fn_spec.varkw: # If we're allowed *varargs", "There's a default value for this arg. capacity = True", "which to consume for calling the function. Returns: component: The", "from the `command` argument or from sys.argv by recursively traversing", "FireError('Missing required flags:', missing_kwonly) # If we accept *varargs, then", "initial_args: # We're making no progress. break if remaining_args: component_trace.AddError(", "might access the \"fn\" property of the initial target component,", "global variables available at the call to Fire. name: Optional.", "If not caught, this exception will cause the client program", "argspec. arg: The name of the argument the value is", "argument must be a string or a sequence of '", "is None, this only finds argument names used by the", "Metadata about the function, typically from Fire decorators. Returns: value,", "complex)): print(result) elif result is not None: print(helputils.HelpString(result, component_trace, verbose))", "call. remaining_args: The remaining args that haven't been consumed yet.", "= argument[2:] contains_equals = '=' in keyword is_bool_syntax = (", "\"\"\" accepts_positional_args = metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If we", "component[target] elif target.replace('-', '_') in component: component = component[target.replace('-', '_')]", "with the License. # You may obtain a copy of", "unless # there's a separator after it, and instead process", "the function fn by consuming args from args. Args: fn:", "names that the target function accepts, including positional and named", "the default separator, '-'. --trace: Get the Fire Trace for", "we'll try to initialize it or # call it. isclass", "'True' elif keyword.startswith('no'): keyword = keyword[2:] value = 'False' else:", "of the Fire call to stdout in a human readable", "dict as a string. Args: result: The dict to convert", "= None lineno = None component_trace.AddAccessedProperty( component, index, [arg], filename,", "keyword = keyword.replace('-', '_') # In order for us to", "making no progress. break if remaining_args: component_trace.AddError( FireError('Could not consume", "None: variables[name] = initial_component variables['component'] = initial_component variables['result'] = component", "inherits from SystemExit, so clients may explicitly catch it with", "lineno) elif isinstance(component, dict) and remaining_args: # The component is", "(if it's a class). When all arguments are consumed and", "is not None: varargs, remaining_args = remaining_args, [] else: varargs", "main(argv): fire.Fire(Component) A Fire CLI command is run by consuming", "is not None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result, verbose=False): \"\"\"Returns", "capacity) except FireError as error: component_trace.AddError(error, initial_args) return component_trace if", "accept *varargs, then use all remaining arguments for *varargs. if", "component (if it's a function), or instantiate the current component", "may explicitly catch it with `except SystemExit` or `except FireExit`.", "limitations under the License. \"\"\"Python Fire is a library for", "the default values in the function argspec. num_required_args: The number", "= component variables['trace'] = component_trace if instance is not None:", "remaining_args[0] component, consumed_args, remaining_args = _GetMember( component, remaining_args) filename, lineno", "component becomes the result of the preceding operation. For example", "Given a starting component and args, this function gets a", "show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace = parsed_flag_args.trace #", "value found_target = True break if not found_target: error =", "used kwargs removed. remaining_args: A list of the supplied args", "capacity: Whether the call could have taken additional args. \"\"\"", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "call it. isclass = inspect.isclass(component) try: target = component.__name__ filename,", "after a final isolated '--' are treated as Flags, eg", "to a hyphen (-), and can be overwritten with the", "\"\"\" parse = _MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity =", "= True default_index = index - num_required_args # index into", "# Determine the calling context. caller = inspect.stack()[1] caller_frame =", "of defaults. Raises: FireError: if additional positional arguments are expected,", "index, arg, metadata) parsed_args.append(value) else: # No value has been", "component_trace = _Fire(component, args, context, name) if component_trace.HasError(): for help_flag", "import inspect import json import os import pipes import shlex", "REPL if interactive mode is selected. 3b. Generate a completion", "step 2, arguments will only ever be consumed up to", "None, None, metadata) varargs = parsed_args + varargs remaining_args +=", "arg, we either: # Need to be explicitly expecting the", "None, this only finds argument names used by the function,", "single line string.\"\"\" # TODO: Ensure line is fewer than", "args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser() parsed_flag_args, unused_args =", "file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose), file=sys.stderr) raise", "2b. If the current component is a routine, call it", "args for which there's no default value. # There's a", "< num_required_args: raise FireError( 'The function received no value for", "returns a dictionary of these keyword arguments, and returns a", "to values. remaining_kwargs: A list of the unused kwargs from", "key, metadata) return parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args,", "fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn) # Note: num_required_args is the number", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "consumed yet. Raises: FireError: If we cannot consume an argument", "`args` into (varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs(", "or fn_spec.varkw: # If we're allowed *varargs or **kwargs, there's", "None remaining_args = args while True: last_component = component initial_args", "initial_args) return component_trace remaining_args = remaining_args[1:] filename = None lineno", "elif result is not None: print(helputils.HelpString(result, component_trace, verbose)) def _DictAsString(result,", "return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\\n', ' ')", "call or class to instantiate. args: Args from which to", "treat '-' as '_'. ] for arg_name in arg_names: if", "required argument:', arg) else: # We're past the args for", "a list of arguments, finds occurences of --name value, and", "< len(args): value = args[index + 1] got_argument = True", "remaining_args = remaining_args[1:] filename = None lineno = None component_trace.AddAccessedProperty(", "non-string keys. found_target = False for key, value in component.items():", "Need to be explicitly expecting the keyword, or we need", "arg. capacity = True default_index = index - num_required_args #", "file=sys.stderr) raise FireExit(2, component_trace) elif component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace),", "there's always capacity. capacity = True extra_kw = set(kwargs) -", "elif target.replace('-', '_') in component: component = component[target.replace('-', '_')] else:", "CONDITIONS OF ANY KIND, either express or implied. # See", "name or os.path.basename(sys.argv[0]) # Get args as a list. if", "') def _Fire(component, args, context, name=None): \"\"\"Execute a Fire command", "and args, this function gets a member from that component,", "remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)] return (varargs, kwargs), consumed_args,", "= set(required_kwonly) - set(kwargs) if missing_kwonly: raise FireError('Missing required flags:',", "from which to consume for calling the function. Returns: component:", "from the available supplied args. Modifies kwargs, removing args as", "kwargs, remaining_kwargs, remaining_args skip_argument = False for index, argument in", "if index is not None and 0 <= index <", "is a class, instantiate it using args from args. 2b.", "are consumed and there's no function left to call or", "component_trace if last_component is initial_component: # If the initial component", "def _Fire(component, args, context, name=None): \"\"\"Execute a Fire command on", "there's no function left to call or class left to", "using the args supplied. Arguments that come after a final", "a module, class, routine, object, etc. if component is None:", "remaining_args.pop(0) value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) elif index", "args that were consumed for the function call. remaining_args: The", "command is run by consuming the arguments in the command", "instance is not None: variables['self'] = instance interact.Embed(variables, verbose) component_trace.AddInteractiveMode()", "or we need to be # accepting **kwargs. if got_argument:", "if remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args) return", "and (show_help or interactive or show_trace or show_completion): # Don't", "fn can then be called with fn(*varargs, **kwargs). The remaining_args", "on any Python object: functions, classes, modules, objects, dictionaries, lists,", "fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index +", "available flags for all Fire CLIs are: -v --verbose: Include", "= FireError( 'Cannot find target in dict:', target, component) component_trace.AddError(error,", "No value has been explicitly specified. if remaining_args and accepts_positional_args:", "= 'False' else: value = 'True' else: if index +", "if interactive mode is selected. 3b. Generate a completion script", "instead. This can be a string or a list of", "result.keys()) format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines = []", "to index into component with argument:', arg) component_trace.AddError(error, initial_args) return", "in a human readable way.\"\"\" # TODO: Design human readable", "ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\\n', ' ') def _Fire(component,", "remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw) #", "the traversal of the members of the component, or in", "error: component_trace.AddError(error, initial_args) return component_trace if last_component is initial_component: #", "list of argument names that the target function accepts, including", "to access a member. target = remaining_args[0] if target in", "call the current component (if it's a function), or instantiate", "verbose=False): \"\"\"Prints the result of the Fire call to stdout", "either from the `command` argument or from sys.argv by recursively", "isinstance(component, dict) and remaining_args: # The component is a dict;", "should be visible in the output.\"\"\" return ( verbose or", "component initial_args = remaining_args if not remaining_args and (show_help or", "_Fire(component, args, context, name) if component_trace.HasError(): for help_flag in ['-h',", "with Fire, your main method should call this function. Args:", "= parsed_flag_args.separator show_completion = parsed_flag_args.completion show_help = parsed_flag_args.help show_trace =", "members of the component, or in calling a function or", "so clients may explicitly catch it with `except SystemExit` or", "the final result. Raises: ValueError: If the command argument is", "always capacity. capacity = True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs)", "\"\"\"Constructs a FireExit exception. Args: code: (int) Exit code for", "metadata) varargs = parsed_args + varargs remaining_args += remaining_kwargs consumed_args", "separating \"--\". For example, to get help for a command", "saved_args = [] used_separator = False if separator in remaining_args:", "'_') in component: component = component[target.replace('-', '_')] else: # The", "all Fire CLIs, must go after a separating \"--\". For", "instance for use with -i. instance = component elif isinstance(component,", "1 < len(args): value = args[index + 1] got_argument =", "component (if it's a class). When all arguments are consumed", "as a list. if isinstance(command, six.string_types): args = shlex.split(command) elif", "keyword in fn_args: value = 'True' elif keyword.startswith('no'): keyword =", "if that flag is provided. In step 2, arguments will", "args (the args after the final --) 2. Start with", "== initial_args: # We're making no progress. break if remaining_args:", "the list of `args` into (varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs,", "an instance for use with -i. instance = component elif", "this exception will cause the client program to exit without", "component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result serialized to a single line", "by the remaining arguments. Args: value: The string value to", "- set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the list of `args` into", "(TypeError, ValueError): return str(result).replace('\\n', ' ') def _Fire(component, args, context,", "Returns: A parse function for fn. The parse function accepts", "error = FireError( 'Unable to index into component with argument:',", "command. \"\"\" super(FireExit, self).__init__(code) self.trace = component_trace def _PrintResult(component_trace, verbose=False):", "is the command executed. If not supplied, then the command", "(varargs, kwargs), consumed_args, remaining_args, capacity = parse(args) result = fn(*varargs,", "arg. value = remaining_args.pop(0) value = _ParseValue(value, index, arg, metadata)", "component. Returns: component: The component that was found by consuming", "list of arguments, finds occurences of --name value, and uses", "1] got_argument = True keyword = keyword.replace('-', '_') # In", "as it goes. When building a CLI with Fire, your", "def CompletionScript(name, component): \"\"\"Returns the text of the Bash completion", "on the component, usually from the command line. context: A", "remaining_kwargs.append(argument) if skip_argument: remaining_kwargs.append(args[index + 1]) if not arg_consumed: #", "\"\"\" from __future__ import absolute_import from __future__ import division from", "consumed_args, remaining_args, capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args, kwargs,", "= None component_trace.AddAccessedProperty( component, index, [arg], filename, lineno) elif isinstance(component,", "FireExit`. If not caught, this exception will cause the client", "the dict as a string, but maybe it is as", "interactive or show_trace or show_completion): # Don't initialize the final", "used with the help or trace flags, Fire will raise", "index, [arg], filename, lineno) elif isinstance(component, dict) and remaining_args: #", "a separating \"--\". For example, to get help for a", "component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way", "Fire argument. Args: component: The target component for Fire. args:", "found_target = False for key, value in component.items(): if target", "not None: varargs, remaining_args = remaining_args, [] else: varargs =", "component, consumed_args, remaining_args, capacity = _CallCallable( component, remaining_args) # Update", "component_trace.AddError(error, initial_args) return component_trace if last_component is initial_component: # If", "for the required argument:', arg) else: # We're past the", "None and 0 <= index < len(positional): parse_fn = positional[index]", "to initialize it or # call it. isclass = inspect.isclass(component)", "+ 1] got_argument = True keyword = keyword.replace('-', '_') #", "up to the separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index", "the separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index + 1:]", "code, component_trace): \"\"\"Constructs a FireExit exception. Args: code: (int) Exit", "for the tool to stdout. --separator SEPARATOR: Use SEPARATOR in", "= initial_component variables['result'] = component variables['trace'] = component_trace if instance", "line interface. Simply call the Fire function as your main", "in component_trace.elements[-1].args: command = '{cmd} -- --help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper", "be explicitly expecting the keyword, or we need to be", "command line arguments and their values. remaining_args: The remaining command", "import parser from fire import trace import six def Fire(component=None,", "in fn_args: value = 'True' elif keyword.startswith('no'): keyword = keyword[2:]", "got_argument = True keyword = keyword.replace('-', '_') # In order", "Fire, is the main entrypoint for Python Fire. Executes a", "of the unused kwargs from the original args. remaining_args: A", "in enumerate(fn_args): value = kwargs.pop(arg, None) if value is not", "variables[name] = initial_component variables['component'] = initial_component variables['result'] = component variables['trace']", "= named[arg] elif default is not None: parse_fn = default", "capacity = True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if fn_spec.varkw", "sides of a separator. The separator defaults to a hyphen", "In order for us to consume the argument as a", "= None lineno = None component_trace.AddAccessedProperty( component, target, [target], filename,", "subcomponent of component by consuming an arg from args. Given", "will cause the client program to exit without a stacktrace.", "print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print( helputils.HelpString(result, component_trace, component_trace.verbose),", "arg) component_trace.AddError(error, initial_args) return component_trace remaining_args = remaining_args[1:] filename =", "member. target = remaining_args[0] if target in component: component =", "# Add back in the arguments from after the separator.", "into the appropriate type for calling a function. \"\"\" parse_fn", "of --name value, and uses 'name' as the keyword and", "with component as the current component. 2a. If the current", "= len(fn_spec.args) - len(fn_spec.defaults) required_kwonly = set(fn_spec.kwonlyargs) - set(fn_spec.kwonlydefaults) def", "A Fire CLI command is run by consuming the arguments", "positional and named arguments from the available supplied args. Modifies", "(if it's a class). The target component begins as Component,", "method # and move serialization to it's own module. result", "Returns: parsed_args: A list of values to be used as", "False keyword = argument[2:] contains_equals = '=' in keyword is_bool_syntax", "from component using an arg from args. 2d. Repeat 2a-2c", "index into the defaults. parsed_args.append(fn_defaults[default_index]) for key, value in kwargs.items():", "member. arg = remaining_args[0] try: index = int(arg) component =", "local and global variables available at the call to Fire.", "inspect import json import os import pipes import shlex import", "# In order for us to consume the argument as", "# The argument was not consumed, so it is still", "fn_args or fn_keywords: kwargs[keyword] = value else: remaining_kwargs.append(argument) if skip_argument:", "component, usually from the command line. context: A dict with", "component_trace def _GetMember(component, args): \"\"\"Returns a subcomponent of component by", "consumed_args, remaining_args, capacity = parse(args) result = fn(*varargs, **kwargs) return", "value: The string value to be parsed, typically a command", "the supplied arguments for keyword arguments. Given a list of", "'=' in keyword is_bool_syntax = ( not contains_equals and (index", "We're making no progress. break if remaining_args: component_trace.AddError( FireError('Could not", "help_flag in ['-h', '--help']: if help_flag in component_trace.elements[-1].args: command =", "The object that is the result of the function call.", "remaining_args: The remaining command line arguments, which may still be", "trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_help: result = component_trace.GetResult()", "used, we'll set capacity to True # Select unnamed args.", "on the `trace` property. This exception inherits from SystemExit, so", "arguments:', remaining_args), initial_args) return component_trace if show_completion: if name is", "as the keyword and 'value' as the value. Constructs and", "if remaining_args and accepts_positional_args: # Use a positional arg. value", "sequence of ' 'arguments.') # Determine the calling context. caller", "True default_index = index - num_required_args # index into the", "the function's argspec. arg: The name of the argument the", "for Python Fire. Executes a command either from the `command`", "argument or from sys.argv by recursively traversing the target object", "object `component`'s members consuming arguments, evaluating functions, and instantiating classes", "with the help or trace flags, Fire will raise a", "to the separator. separator_index = remaining_args.index(separator) saved_args = remaining_args[separator_index +", "= fn(*varargs, **kwargs) return result, consumed_args, remaining_args, capacity def _MakeParseFn(fn):", "begins as Component, and at each operation the component becomes", "print_function import inspect import json import os import pipes import", "_MakeParseFn(fn) (varargs, kwargs), consumed_args, remaining_args, capacity = parse(args) result =", "*varargs. if fn_spec.varargs is not None: varargs, remaining_args = remaining_args,", "result of the preceding operation. For example \"command fn arg1", "a list of the remaining arguments. Only if fn_keywords is", "False if argument.startswith('--'): # This is a named argument; get", "the command. Used in interactive mode and in the tab", "a list of arguments and returns (varargs, kwargs), remaining_args. The", "# TODO: Consider alternatives for accessing non-string keys. found_target =", "[ arg, arg.replace('-', '_'), # treat '-' as '_'. ]", "The string value to be parsed, typically a command line", "= False keyword = argument[2:] contains_equals = '=' in keyword", "remaining_args skip_argument = False for index, argument in enumerate(args): if", "1 == len(args) or args[index + 1].startswith('--'))) if contains_equals: keyword,", "provided. In step 2, arguments will only ever be consumed", "if not found_target: error = FireError( 'Cannot find target in", "for. Returns: A parse function for fn. The parse function", "current component (if it's a class). When all arguments are", "find target in dict:', target, component) component_trace.AddError(error, initial_args) return component_trace", "= 'True' elif keyword.startswith('no'): keyword = keyword[2:] value = 'False'", "If we cannot consume an argument to get a member.", "the initial component is a class, keep an instance for", "supplied, this is the command executed. If not supplied, then", "names. fn_keywords: The argument name for **kwargs, or None if", "inspect.isclass(component) or inspect.isroutine(component): # The component is a class or", "the Fire program is available on the `trace` property. This", "see if any parse function from the fn metadata applies", "function left to call or class left to instantiate, the", "# For the current component, only use arguments up to", "parse_fn = positional[index] elif arg in named: parse_fn = named[arg]", "function), or instantiate the current component (if it's a class).", "When using Fire to build a CLI, your main method", "args from args. 2c. Otherwise access a member from component", "help and usage information. -h --help: Provide help and usage", "human readable way.\"\"\" # TODO: Design human readable deserializable serialization", "command line args by default if no command is specified.", "of the argument the value is being parsed for. metadata:", "print(result) elif result is not None: print(helputils.HelpString(result, component_trace, verbose)) def", "and named arguments from the available supplied args. Modifies kwargs,", "remaining_args: A list of the unused arguments from the original", "args = command elif command is None: # Use the", "\"\"\"Creates a parse function for fn. Args: fn: The function", "through fn_args. This returns the values of the args as", "main method to create a CLI. When using Fire to", "client program to exit without a stacktrace. \"\"\" def __init__(self,", "the current component (if it's a function), or instantiate the", "A list of arguments fn_args: A list of argument names", "either: # Need to be explicitly expecting the keyword, or", "with arguments 'arg1' and 'arg2'. Additional examples are available in", "final function unless # there's a separator after it, and", "and named arguments, but not the varargs or kwargs names.", "kwargs removed. remaining_args: A list of the supplied args that", "determined by the remaining arguments. Args: value: The string value", "import division from __future__ import print_function import inspect import json", "= _ParseValue(value, None, key, metadata) return parsed_args, kwargs, remaining_args, capacity", "function argspec. num_required_args: The number of required arguments from the", "value = _ParseValue(value, index, arg, metadata) parsed_args.append(value) elif index <", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "# If we accept *varargs, then use all remaining arguments", "float, complex)): print(result) elif result is not None: print(helputils.HelpString(result, component_trace,", "arg = args[0] arg_names = [ arg, arg.replace('-', '_'), #", "consuming arguments, evaluating functions, and instantiating classes as it goes.", "for arg_name in arg_names: if arg_name in members: return members[arg_name],", "A list of the unused kwargs from the original args.", "the command is taken from sys.argv instead. This can be", "value. # There's a default value for this arg. capacity", "of these keyword arguments, and returns a list of the", "The trace for the Fire command. \"\"\" super(FireExit, self).__init__(code) self.trace", "got_argument: skip_argument = not contains_equals and not is_bool_syntax arg_consumed =", "trace for the Fire command. \"\"\" super(FireExit, self).__init__(code) self.trace =", "but not the varargs or kwargs names. fn_keywords: The argument", "a separator after it, and instead process the current component.", "Get the Fire Trace for the command. \"\"\" from __future__", "Args: fn_args: A list of argument names that the target", "classes as it goes. When building a CLI with Fire,", "convert to a string verbose: Whether to include 'hidden' members,", "component_trace.show_trace and component_trace.show_help: print('Fire trace:\\n{trace}\\n'.format(trace=component_trace), file=sys.stderr) result = component_trace.GetResult() print(", "call. consumed_args: The args that were consumed for the function", "the value. Constructs and returns a dictionary of these keyword", "dict \"\"\" result = {key: value for key, value in", "argspec. num_required_args: The number of required arguments from the function's", "Flag, we consider # this flag to be a boolean.", "to include 'hidden' members, those keys starting with _. Returns:", "fn(*varargs, **kwargs) return result, consumed_args, remaining_args, capacity def _MakeParseFn(fn): \"\"\"Creates", "positional arguments. metadata: Metadata about the function, typically from Fire", "# Update the trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args,", "remaining_args def _ParseValue(value, index, arg, metadata): \"\"\"Parses value, a string,", "verbose: Whether to include 'hidden' members, those keys starting with", "of `args` into (varargs, kwargs), remaining_args.\"\"\" kwargs, remaining_kwargs, remaining_args =", "component_trace.AddInteractiveMode() return component_trace def _GetMember(component, args): \"\"\"Returns a subcomponent of", "Simply call the Fire function as your main method to", "consumed by getting this member. remaining_args: The remaining args that", "accepts_positional_args: # Use a positional arg. value = remaining_args.pop(0) value", "Drop into a Python REPL after running the command. --completion:", "Raises: ValueError: If there are arguments that cannot be consumed.", "not isinstance(component, six.string_types) or not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result", "variables['result'] = component variables['trace'] = component_trace if instance is not", "remaining_args += remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)] return (varargs,", "your main method includes a call to Fire. Eg: def", "the output.\"\"\" return ( verbose or not isinstance(component, six.string_types) or", "command. Used in interactive mode and in the tab completion", "value in component.items(): if target == str(key): component = value", "result: return '{}' longest_key = max(len(str(key)) for key in result.keys())", "remaining_args = remaining_args[:separator_index] used_separator = True assert separator not in", "if arg_name in members: return members[arg_name], [arg], args[1:] raise FireError('Could", "the target function. kwargs: The input dict kwargs modified with", "next. got_argument = False keyword = argument[2:] contains_equals = '='", "for generating the completion script. Returns: The result of executing", "set capacity to True # Select unnamed args. parsed_args =", "or call the final function unless # there's a separator", "a tuple or list; we'll try to access a member.", "in place of the default separator, '-'. --trace: Get the", "= metadata.get(decorators.ACCEPTS_POSITIONAL_ARGS) capacity = False # If we see a", "named arguments, but not the varargs or kwargs names. fn_keywords:", "Version 2.0 (the \"License\"); # you may not use this", "component_trace.HasError(): for help_flag in ['-h', '--help']: if help_flag in component_trace.elements[-1].args:", "if not remaining_args and (show_help or interactive or show_trace or", "argument[2:] contains_equals = '=' in keyword is_bool_syntax = ( not", "using args from args. 2b. If the current component is", "not consumed, so it is still a remaining argument. remaining_args.append(argument)", "of positional arguments without # default values. All of these", "import decorators from fire import helputils from fire import inspectutils", "2c. Otherwise access a member from component using an arg", "consumed_args, filename, lineno, capacity) except FireError as error: component_trace.AddError(error, initial_args)", "code 2. When used with the help or trace flags,", "missing_kwonly: raise FireError('Missing required flags:', missing_kwonly) # If we accept", "__future__ import absolute_import from __future__ import division from __future__ import", "then the command is taken from sys.argv instead. This can", "= parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator = parsed_flag_args.separator show_completion =", "to convert to a string verbose: Whether to include 'hidden'", "def _ComponentVisible(component, verbose=False): \"\"\"Returns whether a component should be visible", "them to the appropriate type. Args: args: A list of", "When used with the help or trace flags, Fire will", "default value. # There's a default value for this arg.", "you might run: `command -- --help`. The available flags for", "list of strings; a list of strings is preferred. name:", "to all Fire CLIs, must go after a separating \"--\".", "argument the value is being parsed for. metadata: Metadata about", "component elif isinstance(component, (list, tuple)) and remaining_args: # The component", "If we see a default get used, we'll set capacity", "function's argspec. arg: The name of the argument the value", "from fire import parser from fire import trace import six", "file=sys.stderr) raise FireExit(0, component_trace) elif component_trace.show_trace: print('Fire trace:\\n{trace}'.format(trace=component_trace), file=sys.stderr) raise", "run: `command -- --help`. The available flags for all Fire", "by applicable law or agreed to in writing, software #", "keyword, value = keyword.split('=', 1) got_argument = True elif is_bool_syntax:", "\"\"\"Python Fire is a library for creating CLIs from absolutely", "after the final --) 2. Start with component as the", "raise ValueError('The command argument must be a string or a", "kwargs.pop(arg, None) if value is not None: # A value", "trace of the Fire program is available on the `trace`", "kwargs), consumed_args, remaining_args, capacity return _ParseFn def _ParseArgs(fn_args, fn_defaults, num_required_args,", "kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the supplied", "args as strings. They are later processed by _ParseArgs, which", "eg 120 characters. if isinstance(result, six.string_types): return str(result).replace('\\n', ' ')", "private members in help and usage information. -h --help: Provide", "raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return", "it with `except SystemExit` or `except FireExit`. If not caught,", "executed. These exceptions are not raised by the Fire function,", "The component is a tuple or list; we'll try to", "capacity. capacity = True extra_kw = set(kwargs) - set(fn_spec.kwonlyargs) if", "component: The initial target component. command: Optional. If supplied, this", "argparser = parser.CreateParser() parsed_flag_args, unused_args = argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose", "# TODO: Design human readable deserializable serialization method # and", "args that were consumed by getting this member. remaining_args: The", "been consumed yet. Raises: FireError: If we cannot consume an", "SEPARATOR in place of the default separator, '-'. --trace: Get", "caller_frame.f_globals caller_locals = caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals) component_trace", "if skip_argument: remaining_kwargs.append(args[index + 1]) if not arg_consumed: # The", "a function. \"\"\" parse_fn = parser.DefaultParseValue # We check to", "taken args in place of defaults. Raises: FireError: if additional", "= args[:len(args) - len(remaining_args)] return (varargs, kwargs), consumed_args, remaining_args, capacity", "lines.append(line) return '\\n'.join(lines) def _ComponentVisible(component, verbose=False): \"\"\"Returns whether a component", "function gets a member from that component, consuming one arg", "The separator defaults to a hyphen (-), and can be", "lineno) elif remaining_args: # We'll try to access a member", "they are used. Args: fn_args: A list of argument names", "command. --completion: Write the Bash completion script for the tool", "arg from args. 2d. Repeat 2a-2c until no args remain.", "create a CLI. When using Fire to build a CLI,", "number of positional arguments without # default values. All of", "Fire when a Fire command cannot be executed. These exceptions", "Args: value: The string value to be parsed, typically a", "from that component, consuming one arg in the process. Args:", "component_trace.AddError( FireError('Could not consume arguments:', remaining_args), initial_args) return component_trace if", "None) if value is not None: # A value is", "value = 'True' else: if index + 1 < len(args):", "format_string = '{{key:{padding}s}} {{value}}'.format(padding=longest_key + 1) lines = [] for", "= argparser.parse_known_args(flag_args) verbose = parsed_flag_args.verbose interactive = parsed_flag_args.interactive separator =", "recursively traversing the target object `component`'s members consuming arguments, evaluating", "verbose=False): \"\"\"Returns a dict as a string. Args: result: The", "consuming one arg in the process. Args: component: The component", "set(kwargs) if missing_kwonly: raise FireError('Missing required flags:', missing_kwonly) # If", "uses 'name' as the keyword and 'value' as the value.", "list of strings is preferred. name: Optional. The name of", "component is not last_component: remaining_args = [separator] + saved_args else:", "Fire CLIs are: -v --verbose: Include private members in help", "to exit without a stacktrace. \"\"\" def __init__(self, code, component_trace):", "are arguments that cannot be consumed. ValueError: If --completion is", "and usage information. -h --help: Provide help and usage information", "caller_locals = caller_frame.f_locals context = {} context.update(caller_globals) context.update(caller_locals) component_trace =", "never consume arguments from both sides of a separator. The", "arguments from the available supplied args. Modifies kwargs, removing args", "list of the unused kwargs from the original args. remaining_args:", "consumed, so it is still a remaining argument. remaining_args.append(argument) return", "initial_component = component component_trace = trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose,", "arg, arg.replace('-', '_'), # treat '-' as '_'. ] for", "building a CLI with Fire, your main method should call", "2017 Google Inc. # # Licensed under the Apache License,", "file=sys.stderr) raise FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult()", "[arg], filename, lineno) elif isinstance(component, dict) and remaining_args: # The", "remaining args that haven't been consumed yet. capacity: Whether the", "keyword arguments, and returns a list of the remaining arguments.", "the varargs or kwargs names. fn_keywords: The argument name for", "args. parsed_args = [] for index, arg in enumerate(fn_args): value", "import print_function import inspect import json import os import pipes", "return parsed_args, kwargs, remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses", "enumerate(args): if skip_argument: skip_argument = False continue arg_consumed = False", "applicable law or agreed to in writing, software # distributed", "= False if argument.startswith('--'): # This is a named argument;", "print(_OneLineResult(i)) elif inspect.isgeneratorfunction(result): raise NotImplementedError elif isinstance(result, dict): print(_DictAsString(result, verbose))", "parsed_args = [] for index, arg in enumerate(fn_args): value =", "which to consume in the search for the next component.", "or not component.startswith('_')) def _OneLineResult(result): \"\"\"Returns result serialized to a", "metadata) parsed_args.append(value) else: # No value has been explicitly specified.", "and for generating the completion script. Returns: The result of", "any parse function from the fn metadata applies here. parse_fns", "--help'.format(cmd=component_trace.GetCommand()) print(('WARNING: The proper way to show help is {cmd}.\\n'", "extra_kw) missing_kwonly = set(required_kwonly) - set(kwargs) if missing_kwonly: raise FireError('Missing", "= parse_fns['positional'] named = parse_fns['named'] if index is not None", "it. isclass = inspect.isclass(component) try: target = component.__name__ filename, lineno", "by consuming args from args. Args: fn: The function to", "or class to instantiate. args: Args from which to consume", "used. Args: fn_args: A list of argument names that the", "available at the call to Fire. name: Optional. The name", "those keys starting with _. Returns: A string representing the", "the traversal. The steps performed by this method are: 1.", "decorators.GetMetadata(fn) # Note: num_required_args is the number of positional arguments", "command line argument. index: The index of the value in", "initial_component: # If the initial component is a class, keep", "# component can be a module, class, routine, object, etc.", "of component by consuming an arg from args. Given a", "interactive: variables = context.copy() if name is not None: variables[name]", "isinstance(command, (list, tuple)): args = command elif command is None:", "_ParseValue(value, None, key, metadata) return parsed_args, kwargs, remaining_args, capacity def", "num_required_args # index into the defaults. parsed_args.append(fn_defaults[default_index]) for key, value", "tuple or list; we'll try to access a member. arg", "ValueError: If the command argument is supplied, but not a", "calling a function or instantiating a class found during the", "name available. \"\"\" args, flag_args = parser.SeparateFlagArgs(args) argparser = parser.CreateParser()", "try to access a member. target = remaining_args[0] if target", "that were consumed by getting this member. remaining_args: The remaining", "available in the examples directory. Fire Flags, common to all", "use all remaining arguments for *varargs. if fn_spec.varargs is not", "The name of the command as entered at the command", "= component initial_args = remaining_args if not remaining_args and (show_help", "CLI. When using Fire to build a CLI, your main", "for the next component. Returns: component: The component that was", "import trace import six def Fire(component=None, command=None, name=None): \"\"\"This function,", "ascii. return json.dumps(result, ensure_ascii=False) except (TypeError, ValueError): return str(result).replace('\\n', '", "A list of args to consume in Firing on the", "the Fire function, but rather are caught and added to", "# You may obtain a copy of the License at", "a parse function for fn. Args: fn: The function or", "if fn_spec.varargs is not None: varargs, remaining_args = remaining_args, []", "varargs remaining_args += remaining_kwargs consumed_args = args[:len(args) - len(remaining_args)] return", "arg in named: parse_fn = named[arg] elif default is not", "keyword is_bool_syntax = ( not contains_equals and (index + 1", "to a separator; a single step will never consume arguments", "result of the function call. consumed_args: The args that were", "command elif command is None: # Use the command line", "args. 2c. Otherwise access a member from component using an", "into ipython REPL if interactive mode is selected. 3b. Generate", "or show_completion): # Don't initialize the final class or call", "class left to instantiate, the resulting current component is the", "'False' else: value = 'True' else: if index + 1", "component_trace): \"\"\"Constructs a FireExit exception. Args: code: (int) Exit code", "\"\"\" kwargs = {} remaining_kwargs = [] remaining_args = []", "_ParseValue(value, None, None, metadata) varargs = parsed_args + varargs remaining_args", "operation the component becomes the result of the preceding operation.", "traversal. The steps performed by this method are: 1. Parse", "except FireError as error: component_trace.AddError(error, initial_args) return component_trace if last_component", "not been used yet. capacity: Whether the call could have", "--separator Fire argument. Args: component: The target component for Fire.", "arg, metadata) parsed_args.append(value) elif index < num_required_args: raise FireError( 'The", "Copyright (C) 2017 Google Inc. # # Licensed under the", "your main method should call this function. Args: component: The", "result = component_trace.GetResult() return result def CompletionScript(name, component): \"\"\"Returns the", "default values in the function argspec. num_required_args: The number of", "+ saved_args elif (inspect.isclass(last_component) or inspect.isroutine(last_component)): remaining_args = saved_args component_trace.AddSeparator()", "of strings; a list of strings is preferred. name: Optional.", "no name available. \"\"\" args, flag_args = parser.SeparateFlagArgs(args) argparser =", "kwargs from the original args. remaining_args: A list of the", "fn arg1 arg2\" might access the \"fn\" property of the", "You can call Fire on any Python object: functions, classes,", "metadata = decorators.GetMetadata(fn) # Note: num_required_args is the number of", "and 'arg2'. Additional examples are available in the examples directory.", "examples directory. Fire Flags, common to all Fire CLIs, must", "kwargs, removing args as they are used. Args: fn_args: A", "(index + 1 == len(args) or args[index + 1].startswith('--'))) if", "If supplied, this is the command executed. If not supplied,", "class or a routine; we'll try to initialize it or", "args to consume in Firing on the component, usually from", "returns the values of the args as strings. They are", "the command line. value = _ParseValue(value, index, arg, metadata) parsed_args.append(value)", "component, tracing Fire's execution path as it consumes args. Raises:", "arguments and returns (varargs, kwargs), remaining_args. The original function fn", "an argument to get a member. \"\"\" members = dict(inspect.getmembers(component))", "argument:', arg) else: # We're past the args for which", "progress. break if remaining_args: component_trace.AddError( FireError('Could not consume arguments:', remaining_args),", "encounters a FireError, Fire will raise a FireExit with code", "call Fire on any Python object: functions, classes, modules, objects,", "kwargs: Dict with named command line arguments and their values.", "to either access a member of the current component, call", "remaining_args = _ParseKeywordArgs( args, all_args, fn_spec.varkw) # Note: _ParseArgs modifies", "\"\"\" def __init__(self, code, component_trace): \"\"\"Constructs a FireExit exception. Args:", "the required argument:', arg) else: # We're past the args", "component_trace def _PrintResult(component_trace, verbose=False): \"\"\"Prints the result of the Fire", "specified but no name available. \"\"\" args, flag_args = parser.SeparateFlagArgs(args)", "property. This exception inherits from SystemExit, so clients may explicitly", "This can be a string or a list of strings;", "[arg], args[1:] raise FireError('Could not consume arg:', arg) def _CallCallable(fn,", "'value' as the value. Constructs and returns a dictionary of", "the function, typically from Fire decorators. Returns: value, parsed into", "no command is specified. args = sys.argv[1:] else: raise ValueError('The", "try to access a member of the component. try: target", "trace. if isclass: component_trace.AddInstantiatedClass( component, target, consumed_args, filename, lineno, capacity)", "in component.items(): if target == str(key): component = value found_target", "remaining_args, capacity = _ParseArgs( fn_spec.args, fn_spec.defaults, num_required_args, kwargs, remaining_args, metadata)", "member of current component, call the current component (if it's", "name) if component_trace.HasError(): for help_flag in ['-h', '--help']: if help_flag", "for interactive mode or completion script generation. Other arguments are", "SystemExit, so clients may explicitly catch it with `except SystemExit`", "ipython REPL if interactive mode is selected. 3b. Generate a", "command line. context: A dict with the local and global", "for accessing non-string keys. found_target = False for key, value", "component_trace.AddSeparator() elif component is not last_component: remaining_args = [separator] +", "-v --verbose: Include private members in help and usage information.", "FireError( 'Cannot find target in dict:', target, component) component_trace.AddError(error, initial_args)", "arguments for keyword arguments. Given a list of arguments, finds", "is not None and 0 <= index < len(positional): parse_fn", "In step 2, arguments will only ever be consumed up", "to be used as positional arguments for calling the target", "search for the next component. Returns: component: The component that", "(list, tuple)): args = command elif command is None: #", "\"License\"); # you may not use this file except in", "the keyword and 'value' as the value. Constructs and returns", "remaining_args, capacity def _ParseKeywordArgs(args, fn_args, fn_keywords): \"\"\"Parses the supplied arguments", "member of the component. try: target = remaining_args[0] component, consumed_args,", "try: target = remaining_args[0] component, consumed_args, remaining_args = _GetMember( component,", "from __future__ import print_function import inspect import json import os", "parsed_flag_args.help show_trace = parsed_flag_args.trace # component can be a module,", "They all work! Python Fire turns any Python object into", "args: Args from which to consume for calling the function.", "command either from the `command` argument or from sys.argv by", "True break if not found_target: error = FireError( 'Cannot find", "component and args, this function gets a member from that", "Whether the call could have taken additional args. \"\"\" parse", "return str(result).replace('\\n', ' ') try: # Don't force conversion to", "but not a string or a sequence of arguments. FireExit:", "in kwargs.items(): kwargs[key] = _ParseValue(value, None, key, metadata) return parsed_args,", "found during the traversal. The steps performed by this method", "it consumes args. Raises: ValueError: If there are arguments that", "the result of the function call. consumed_args: The args that", "string, into the appropriate type. The function used to parse", "function with arguments 'arg1' and 'arg2'. Additional examples are available", "parsed into the appropriate type for calling a function. \"\"\"", "instead process the current component. break saved_args = [] used_separator", "specified. if remaining_args and accepts_positional_args: # Use a positional arg.", "contains_equals = '=' in keyword is_bool_syntax = ( not contains_equals", "initialize the final class or call the final function unless", "= inspectutils.GetFullArgSpec(fn) all_args = fn_spec.args + fn_spec.kwonlyargs metadata = decorators.GetMetadata(fn)", "command=None, name=None): \"\"\"This function, Fire, is the main entrypoint for", "value = kwargs.pop(arg, None) if value is not None: #", "component, target, consumed_args, filename, lineno, capacity) else: component_trace.AddCalledRoutine( component, target,", "FireExit(0, component_trace) else: _PrintResult(component_trace, verbose=component_trace.verbose) result = component_trace.GetResult() return result", "if got_argument: skip_argument = not contains_equals and not is_bool_syntax arg_consumed", "3a. Embed into ipython REPL if interactive mode is selected.", "Arguments that come after a final isolated '--' are treated", "value is specified at the command line. value = _ParseValue(value,", "the fn metadata applies here. parse_fns = metadata.get(decorators.FIRE_PARSE_FNS) if parse_fns:", "be parsed, typically a command line argument. index: The index", "function unless # there's a separator after it, and instead", "if last_component is initial_component: # If the initial component is", "= caller[0] caller_globals = caller_frame.f_globals caller_locals = caller_frame.f_locals context =", "trace.FireTrace( initial_component=initial_component, name=name, separator=separator, verbose=verbose, show_help=show_help, show_trace=show_trace) instance = None", "set(fn_spec.kwonlydefaults) def _ParseFn(args): \"\"\"Parses the list of `args` into (varargs,", "remaining_args = remaining_args, [] else: varargs = [] for index,", "keyword[2:] value = 'False' else: value = 'True' else: if", "is a dict; we'll try to access a member. target", "in the function's argspec. arg: The name of the argument", "= component[target] elif target.replace('-', '_') in component: component = component[target.replace('-',", "cannot be executed. These exceptions are not raised by the", "Exit code for the Fire CLI. component_trace: (FireTrace) The trace", "enumerate(fn_args): value = kwargs.pop(arg, None) if value is not None:", "command argument must be a string or a sequence of" ]
[ "pandas as pd from sklearn.svm import SVC from sklearn.model_selection import", "= train_test_split(x, y, test_size = 0.2, random_state=0) #define a pipeline", "output = pipe.predict([inputs]) print(output) else: output = [\"None\"] print(\"\\nUser didn't", "pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply", "inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is :\") output =", "from sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.pipeline", "Get the dataset from the users GitHub repository dataset_path =", "random_state=0) #define a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the", "print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding", "SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from", "repository dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data =", "from the users GitHub repository dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"]", "else: output = [\"None\"] print(\"\\nUser didn't provided inputs to predict\")", "#define a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model", "print(\"\\nThe Predicted Ouput is :\") output = pipe.predict([inputs]) print(output) else:", "pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container if", "= \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data = pd.read_csv(dataset_path) print()", "y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable", "from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.compose", "train_test_split(x, y, test_size = 0.2, random_state=0) #define a pipeline pipe", "Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model: \"+str(accuracy*100)) if", "the dataset from the users GitHub repository dataset_path = \"https://raw.githubusercontent.com/\"", "encoding on output variable x_train, x_test, y_train, y_test = train_test_split(x,", "pd from sklearn.svm import SVC from sklearn.model_selection import train_test_split from", "sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.pipeline import", "pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print(\"\\nModel Training Finished\")", "print(\"\\nUser didn't provided inputs to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\")", "= 0.2, random_state=0) #define a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train)", "main(): # Get the dataset from the users GitHub repository", "x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=0)", "inputs to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\") if __name__ ==", "\"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data = pd.read_csv(dataset_path) print() print(data.describe())", "of the Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the", "pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print(\"\\nModel Training", "sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing import", "def main(): # Get the dataset from the users GitHub", "y, test_size = 0.2, random_state=0) #define a pipeline pipe =", "Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in", "the model print(\"\\nModel Training Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of", "= pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') #", "sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline import pickle def", "sklearn.pipeline import make_pipeline import pickle def main(): # Get the", "Ouput is :\") output = pipe.predict([inputs]) print(output) else: output =", "\"/master/dataset.csv\" data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans =", "variable x_train, x_test, y_train, y_test = train_test_split(x, y, test_size =", ":\") output = pipe.predict([inputs]) print(output) else: output = [\"None\"] print(\"\\nUser", "sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.compose import", "container if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe", "import make_column_transformer from sklearn.pipeline import make_pipeline import pickle def main():", "a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print(\"\\nModel", "ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is :\") output = pipe.predict([inputs]) print(output)", "= [\"None\"] print(\"\\nUser didn't provided inputs to predict\") print(\"\\n=======================Action Completed========================\")", "from sklearn.svm import SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing", "dataset from the users GitHub repository dataset_path = \"https://raw.githubusercontent.com/\" +", "os, ast import pandas as pd from sklearn.svm import SVC", "+ os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1]", "if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted", "artifact in docker container if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs", "docker container if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"])", "import make_pipeline import pickle def main(): # Get the dataset", "# apply encoding on output variable x_train, x_test, y_train, y_test", "import train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer", "output = [\"None\"] print(\"\\nUser didn't provided inputs to predict\") print(\"\\n=======================Action", "0.2, random_state=0) #define a pipeline pipe = make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training", "import OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline", "pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) #", "from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline import pickle", "Predicted Ouput is :\") output = pipe.predict([inputs]) print(output) else: output", "= make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable x_train, x_test,", "#training the model print(\"\\nModel Training Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy", "as pd from sklearn.svm import SVC from sklearn.model_selection import train_test_split", "# store the artifact in docker container if not os.environ[\"INPUT_MYINPUT\"]", "provided inputs to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\") if __name__", "os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is", "pipe.predict([inputs]) print(output) else: output = [\"None\"] print(\"\\nUser didn't provided inputs", "if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container", "data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough')", "import pickle def main(): # Get the dataset from the", "to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\") if __name__ == \"__main__\":", "OneHotEncoder from sklearn.compose import make_column_transformer from sklearn.pipeline import make_pipeline import", "model print(\"\\nModel Training Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of the", "the users GitHub repository dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] +", "[\"None\"] print(\"\\nUser didn't provided inputs to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output", "accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model: \"+str(accuracy*100)) if pipe:", "y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state=0) #define", "= make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print(\"\\nModel Training Finished\") accuracy", "import SVC from sklearn.model_selection import train_test_split from sklearn.preprocessing import OneHotEncoder", "print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on", "the artifact in docker container if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs':", "= pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb'))", "\"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker", "= pipe.predict([inputs]) print(output) else: output = [\"None\"] print(\"\\nUser didn't provided", "print(\"\\nAccuracy of the Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store", "import os, ast import pandas as pd from sklearn.svm import", "the Model: \"+str(accuracy*100)) if pipe: pickle.dump(pipe,open('model.pkl','wb')) # store the artifact", "Training Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model: \"+str(accuracy*100))", "apply encoding on output variable x_train, x_test, y_train, y_test =", "make_pipeline import pickle def main(): # Get the dataset from", "didn't provided inputs to predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\") if", "# Get the dataset from the users GitHub repository dataset_path", "print(\"\\nModel Training Finished\") accuracy = pipe.score(x_test,y_test) print(\"\\nAccuracy of the Model:", "users GitHub repository dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\"", "pipe.fit(x_train,y_train) #training the model print(\"\\nModel Training Finished\") accuracy = pipe.score(x_test,y_test)", "print(output) else: output = [\"None\"] print(\"\\nUser didn't provided inputs to", "predict\") print(\"\\n=======================Action Completed========================\") print(f\"::set-output name=myOutput::{output[0]}\") if __name__ == \"__main__\": main()", "ast import pandas as pd from sklearn.svm import SVC from", "x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output", "GitHub repository dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data", "'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is :\") output", "column_trans = make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable x_train,", "x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2,", "= ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is :\") output = pipe.predict([inputs])", "is :\") output = pipe.predict([inputs]) print(output) else: output = [\"None\"]", "on output variable x_train, x_test, y_train, y_test = train_test_split(x, y,", "== 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput is :\")", "store the artifact in docker container if not os.environ[\"INPUT_MYINPUT\"] ==", "train_test_split from sklearn.preprocessing import OneHotEncoder from sklearn.compose import make_column_transformer from", "not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs = ast.literal_eval(os.environ[\"INPUT_MYINPUT\"]) print(\"\\nThe Predicted Ouput", "import pandas as pd from sklearn.svm import SVC from sklearn.model_selection", "+ \"/master/dataset.csv\" data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1] column_trans", "output variable x_train, x_test, y_train, y_test = train_test_split(x, y, test_size", "os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data = pd.read_csv(dataset_path) print() print(data.describe()) x=data.iloc[:,:-1] y=data.iloc[:,-1]", "<reponame>AmirValeev/auto-ml-classifier<gh_stars>0 import os, ast import pandas as pd from sklearn.svm", "make_pipeline(column_trans,SVC()) pipe.fit(x_train,y_train) #training the model print(\"\\nModel Training Finished\") accuracy =", "dataset_path = \"https://raw.githubusercontent.com/\" + os.environ[\"GITHUB_REPOSITORY\"] + \"/master/dataset.csv\" data = pd.read_csv(dataset_path)", "test_size = 0.2, random_state=0) #define a pipeline pipe = make_pipeline(column_trans,SVC())", "pickle def main(): # Get the dataset from the users", "from sklearn.pipeline import make_pipeline import pickle def main(): # Get", "make_column_transformer from sklearn.pipeline import make_pipeline import pickle def main(): #", "y_test = train_test_split(x, y, test_size = 0.2, random_state=0) #define a", "in docker container if not os.environ[\"INPUT_MYINPUT\"] == 'zeroinputs': inputs =", "make_column_transformer((OneHotEncoder(),[-1]),remainder='passthrough') # apply encoding on output variable x_train, x_test, y_train,", "pickle.dump(pipe,open('model.pkl','wb')) # store the artifact in docker container if not" ]
[ "= header_value.split(\" \") if len(parts) != 2 or parts[0].lower() !=", "Attempts to parse the given header value as a Base64-encoded", "a Base64-encoded Basic auth header. \"\"\" if not header_value: return", "\"basic\": return None try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts)", "1) if len(basic_parts) != 2: return None return basic_parts except", "header value as a Base64-encoded Basic auth header. \"\"\" if", "len(parts) != 2 or parts[0].lower() != \"basic\": return None try:", "header_value.split(\" \") if len(parts) != 2 or parts[0].lower() != \"basic\":", "= base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) != 2: return None return", "parse the given header value as a Base64-encoded Basic auth", "if len(parts) != 2 or parts[0].lower() != \"basic\": return None", "None try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) != 2:", "Basic auth header. \"\"\" if not header_value: return None parts", "if len(basic_parts) != 2: return None return basic_parts except ValueError:", "<gh_stars>1000+ import base64 def parse_basic_auth(header_value): \"\"\" Attempts to parse the", "try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) != 2: return", "basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) != 2: return None", "base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) != 2: return None return basic_parts", "\") if len(parts) != 2 or parts[0].lower() != \"basic\": return", "if not header_value: return None parts = header_value.split(\" \") if", "def parse_basic_auth(header_value): \"\"\" Attempts to parse the given header value", "return None parts = header_value.split(\" \") if len(parts) != 2", "header. \"\"\" if not header_value: return None parts = header_value.split(\"", "parts[0].lower() != \"basic\": return None try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1)", "header_value: return None parts = header_value.split(\" \") if len(parts) !=", "parse_basic_auth(header_value): \"\"\" Attempts to parse the given header value as", "\"\"\" Attempts to parse the given header value as a", "the given header value as a Base64-encoded Basic auth header.", "!= \"basic\": return None try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if", "to parse the given header value as a Base64-encoded Basic", "!= 2 or parts[0].lower() != \"basic\": return None try: basic_parts", "base64 def parse_basic_auth(header_value): \"\"\" Attempts to parse the given header", "!= 2: return None return basic_parts except ValueError: return None", "return None try: basic_parts = base64.b64decode(parts[1]).split(\":\", 1) if len(basic_parts) !=", "as a Base64-encoded Basic auth header. \"\"\" if not header_value:", "\"\"\" if not header_value: return None parts = header_value.split(\" \")", "parts = header_value.split(\" \") if len(parts) != 2 or parts[0].lower()", "2 or parts[0].lower() != \"basic\": return None try: basic_parts =", "value as a Base64-encoded Basic auth header. \"\"\" if not", "given header value as a Base64-encoded Basic auth header. \"\"\"", "None parts = header_value.split(\" \") if len(parts) != 2 or", "len(basic_parts) != 2: return None return basic_parts except ValueError: return", "not header_value: return None parts = header_value.split(\" \") if len(parts)", "auth header. \"\"\" if not header_value: return None parts =", "or parts[0].lower() != \"basic\": return None try: basic_parts = base64.b64decode(parts[1]).split(\":\",", "import base64 def parse_basic_auth(header_value): \"\"\" Attempts to parse the given", "Base64-encoded Basic auth header. \"\"\" if not header_value: return None" ]
[ "is the event object. The `user` kwarg contains the user", "available. \"\"\") updated = _signals.signal('updated', \"\"\" Called when basic data", "object. The `user` kwarg contains the user performing the deletion", "the session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called when a", "Called when a session is updated. The *sender* is the", "passed in the `old_type` kwarg. \"\"\") moved = _signals.signal('moved', \"\"\"", "the `Event` object of the old event, the new event", "updated = _signals.signal('updated', \"\"\" Called when basic data of an", "_signals.signal('session-updated', \"\"\" Called when a session is updated. The *sender*", "the # LICENSE file for more details. from indico.core.signals.event import", "the timetable view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected to", "Expected to return `EventLogRenderer` classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\"", "with ``(old, new)`` tuples for each change. Note than the", "Called when a session is deleted. The *sender* is the", "is the session block. This signal is called before the", "will be used to update the original representation (fields to", "the `changes` kwarg, with ``(old, new)`` tuples for each change.", "the order or some data on the person link). \"\"\")", "under the terms of the MIT License; see the #", "something changed (usually the order or some data on the", "when an event is moved to a different category. The", "2020 CERN # # Indico is free software; you can", "of an event is updated. The *sender* is the event.", "so that plugins can add their own fields. The *sender*", "to be added to the event side menu. A single", "\"\"\" Called when an event is deleted. The *sender* is", "The `sender` is the new Event. \"\"\") session_updated = _signals.signal('session-updated',", "object of the old event, the new event is passed", "old event, the new event is passed in the `new_event`", "_signals.signal('timetable-buttons', \"\"\" Expected to return a list of tuples ('button_name',", "must be yielded. \"\"\") deleted = _signals.signal('deleted', \"\"\" Called when", "order or some data on the person link). \"\"\") cloned", "_signals.signal('metadata-postprocess', \"\"\" Called right after a dict-like representation of an", "you can redistribute it and/or # modify it under the", "the event. A dict of changes is passed in the", "should be assumed that something changed (usually the order or", "_signals sidemenu = _signals.signal('sidemenu', \"\"\" Expected to return ``MenuEntryData`` objects", "dict that will be used to update the original representation", "be used to update the original representation (fields to add", "Expected to return a list of tuples ('button_name', 'js-call-class'). Called", "`old` and `new` being the same lists for technical reasons.", "added to the event side menu. A single entry can", "the event object. The metadata is passed in the `data`", "= _signals.signal('cloned', \"\"\" Called when an event is cloned. The", "own fields. The *sender* is a string parameter specifying the", "updated. The *sender* is the event. A dict of changes", "should return a dict that will be used to update", "is moved to a different category. The `sender` is the", "on the person link). \"\"\") cloned = _signals.signal('cloned', \"\"\" Called", "_signals.signal('type-changed', \"\"\" Called when the type of an event is", "Copyright (C) 2002 - 2020 CERN # # Indico is", "*sender* is the session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called", "session_updated = _signals.signal('session-updated', \"\"\" Called when a session is updated.", "the old type is passed in the `old_type` kwarg. \"\"\")", "_signals.signal('sidemenu', \"\"\" Expected to return ``MenuEntryData`` objects to be added", "'js-call-class'). Called when building the timetable view. \"\"\") get_log_renderers =", "right after a dict-like representation of an event is created,", "event, the new event is passed in the `new_event` kwarg.", "that will be used to update the original representation (fields", "basic data of an event is updated. The *sender* is", "The *sender* is a string parameter specifying the source of", "menu. A single entry can be returned directly, multiple entries", "the old event, the new event is passed in the", "*sender* is a string parameter specifying the source of the", "metadata. The *event* kwarg contains the event object. The metadata", "= _signals.signal('metadata-postprocess', \"\"\" Called right after a dict-like representation of", "(C) 2002 - 2020 CERN # # Indico is free", "\"\"\" Called when a session is deleted. The *sender* is", "metadata is passed in the `data` kwarg. The signal should", "when an event is cloned. The *sender* is the `Event`", "when the type of an event is changed. The `sender`", "reasons. If the key is present, it should be assumed", "LICENSE file for more details. from indico.core.signals.event import _signals sidemenu", "the `data` kwarg. The signal should return a dict that", "session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called when a session", "kwarg. \"\"\") type_changed = _signals.signal('type-changed', \"\"\" Called when the type", "technical reasons. If the key is present, it should be", "when building the timetable view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\"", "be returned directly, multiple entries must be yielded. \"\"\") deleted", "event is changed. The `sender` is the event, the old", "timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected to return a list of", "different category. The `sender` is the event, the old category", "to the event side menu. A single entry can be", "parameter specifying the source of the metadata. The *event* kwarg", "old category is in the `old_parent` kwarg. \"\"\") created =", "created = _signals.signal('created', \"\"\" Called when a new event is", "is deleted. The *sender* is the session block. This signal", "changed. The `sender` is the event, the old type is", "Called when basic data of an event is updated. The", "`Event` object of the old event, the new event is", "is changed. The `sender` is the event, the old type", "Called when a new event is created. The `sender` is", "it should be assumed that something changed (usually the order", "building the timetable view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected", "\"\"\" Called when a new event is created. The `sender`", "yielded. \"\"\") deleted = _signals.signal('deleted', \"\"\" Called when an event", "present, it should be assumed that something changed (usually the", "each change. Note than the `person_links` change may happen with", "for more details. from indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu',", "is passed in the `new_event` kwarg. \"\"\") type_changed = _signals.signal('type-changed',", "\"\"\" Expected to return a list of tuples ('button_name', 'js-call-class').", "an event is created, so that plugins can add their", "\"\"\" Expected to return `EventLogRenderer` classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions',", "changed (usually the order or some data on the person", "is the event, the old category is in the `old_parent`", "session block. This signal is called before the ``db.session.delete()`` on", "tuples for each change. Note than the `person_links` change may", "an event is moved to a different category. The `sender`", "a dict that will be used to update the original", "the block is executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected", "The metadata is passed in the `data` kwarg. The signal", "change may happen with `old` and `new` being the same", "\"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected to return `EventFeature` subclasses.", "is passed in the `old_type` kwarg. \"\"\") moved = _signals.signal('moved',", "# This file is part of Indico. # Copyright (C)", "return `EventLogRenderer` classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected to", "= _signals.signal('get-feature-definitions', \"\"\" Expected to return `EventFeature` subclasses. \"\"\") metadata_postprocess", "entries must be yielded. \"\"\") deleted = _signals.signal('deleted', \"\"\" Called", "= _signals.signal('updated', \"\"\" Called when basic data of an event", "signal should return a dict that will be used to", "a new event is created. The `sender` is the new", "session_deleted = _signals.signal('session-deleted', \"\"\" Called when a session is deleted.", "is the event, the old type is passed in the", "in the `old_parent` kwarg. \"\"\") created = _signals.signal('created', \"\"\" Called", "\"\"\" Called when a session is updated. The *sender* is", "link). \"\"\") cloned = _signals.signal('cloned', \"\"\" Called when an event", "some data on the person link). \"\"\") cloned = _signals.signal('cloned',", "_signals.signal('cloned', \"\"\" Called when an event is cloned. The *sender*", "= _signals.signal('deleted', \"\"\" Called when an event is deleted. The", "`data` kwarg. The signal should return a dict that will", "passed in the `data` kwarg. The signal should return a", "_signals.signal('updated', \"\"\" Called when basic data of an event is", "Called when building the timetable view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers',", "it and/or # modify it under the terms of the", "\"\"\" Called when an event is moved to a different", "block. This signal is called before the ``db.session.delete()`` on the", "category. The `sender` is the event, the old category is", "# modify it under the terms of the MIT License;", "*sender* is the event object. The `user` kwarg contains the", "is created. The `sender` is the new Event. \"\"\") session_updated", "The *sender* is the event. A dict of changes is", "is the event. A dict of changes is passed in", "of an event is created, so that plugins can add", "of tuples ('button_name', 'js-call-class'). Called when building the timetable view.", "the event object. The `user` kwarg contains the user performing", "MIT License; see the # LICENSE file for more details.", "to return `EventFeature` subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called", "\"\"\" Called when a session block is deleted. The *sender*", "The *sender* is the event object. The `user` kwarg contains", "kwarg. \"\"\") created = _signals.signal('created', \"\"\" Called when a new", "dict of changes is passed in the `changes` kwarg, with", "get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected to return `EventFeature` subclasses. \"\"\")", "new Event. \"\"\") session_updated = _signals.signal('session-updated', \"\"\" Called when a", "`EventLogRenderer` classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected to return", "event is passed in the `new_event` kwarg. \"\"\") type_changed =", "part of Indico. # Copyright (C) 2002 - 2020 CERN", "The signal should return a dict that will be used", "= _signals.signal('sidemenu', \"\"\" Expected to return ``MenuEntryData`` objects to be", "data on the person link). \"\"\") cloned = _signals.signal('cloned', \"\"\"", "`sender` is the new Event. \"\"\") session_updated = _signals.signal('session-updated', \"\"\"", "the terms of the MIT License; see the # LICENSE", "the user performing the deletion if available. \"\"\") updated =", "software; you can redistribute it and/or # modify it under", "is in the `old_parent` kwarg. \"\"\") created = _signals.signal('created', \"\"\"", "change. Note than the `person_links` change may happen with `old`", "to return ``MenuEntryData`` objects to be added to the event", "file for more details. from indico.core.signals.event import _signals sidemenu =", "The *sender* is the session. \"\"\") session_deleted = _signals.signal('session-deleted', \"\"\"", "subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called right after a", "the same lists for technical reasons. If the key is", "when a new event is created. The `sender` is the", "If the key is present, it should be assumed that", "\"\"\") cloned = _signals.signal('cloned', \"\"\" Called when an event is", "for technical reasons. If the key is present, it should", "when a session is updated. The *sender* is the session.", "is part of Indico. # Copyright (C) 2002 - 2020", "is executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected to return", "\"\"\") updated = _signals.signal('updated', \"\"\" Called when basic data of", "update the original representation (fields to add or override). \"\"\")", "of the MIT License; see the # LICENSE file for", "# Indico is free software; you can redistribute it and/or", "tuples ('button_name', 'js-call-class'). Called when building the timetable view. \"\"\")", "changes is passed in the `changes` kwarg, with ``(old, new)``", "\"\"\") created = _signals.signal('created', \"\"\" Called when a new event", "user performing the deletion if available. \"\"\") updated = _signals.signal('updated',", "to a different category. The `sender` is the event, the", "an event is deleted. The *sender* is the event object.", "A single entry can be returned directly, multiple entries must", "redistribute it and/or # modify it under the terms of", "side menu. A single entry can be returned directly, multiple", "in the `changes` kwarg, with ``(old, new)`` tuples for each", "representation of an event is created, so that plugins can", "This file is part of Indico. # Copyright (C) 2002", "it under the terms of the MIT License; see the", "than the `person_links` change may happen with `old` and `new`", "event side menu. A single entry can be returned directly,", "= _signals.signal('moved', \"\"\" Called when an event is moved to", "when an event is deleted. The *sender* is the event", "is a string parameter specifying the source of the metadata.", "_signals.signal('get-log-renderers', \"\"\" Expected to return `EventLogRenderer` classes. \"\"\") get_feature_definitions =", "the old category is in the `old_parent` kwarg. \"\"\") created", "\"\"\") type_changed = _signals.signal('type-changed', \"\"\" Called when the type of", "when a session is deleted. The *sender* is the session.", "The *sender* is the `Event` object of the old event,", "\"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected to return `EventLogRenderer` classes.", "Called when an event is moved to a different category.", "timetable view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected to return", "\"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called when a session block", "session. \"\"\") session_deleted = _signals.signal('session-deleted', \"\"\" Called when a session", "that plugins can add their own fields. The *sender* is", "deleted. The *sender* is the event object. The `user` kwarg", "of changes is passed in the `changes` kwarg, with ``(old,", "block is deleted. The *sender* is the session block. This", "is updated. The *sender* is the session. \"\"\") session_deleted =", "the key is present, it should be assumed that something", "the session. \"\"\") session_deleted = _signals.signal('session-deleted', \"\"\" Called when a", "# LICENSE file for more details. from indico.core.signals.event import _signals", "the `new_event` kwarg. \"\"\") type_changed = _signals.signal('type-changed', \"\"\" Called when", "the `person_links` change may happen with `old` and `new` being", "a string parameter specifying the source of the metadata. The", "metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called right after a dict-like representation", "_signals.signal('moved', \"\"\" Called when an event is moved to a", "the session block. This signal is called before the ``db.session.delete()``", "for each change. Note than the `person_links` change may happen", "import _signals sidemenu = _signals.signal('sidemenu', \"\"\" Expected to return ``MenuEntryData``", "list of tuples ('button_name', 'js-call-class'). Called when building the timetable", "plugins can add their own fields. The *sender* is a", "`old_parent` kwarg. \"\"\") created = _signals.signal('created', \"\"\" Called when a", "called before the ``db.session.delete()`` on the block is executed. \"\"\")", "block is executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected to", "if available. \"\"\") updated = _signals.signal('updated', \"\"\" Called when basic", "*sender* is the session block. This signal is called before", "\"\"\" Called when an event is cloned. The *sender* is", "the person link). \"\"\") cloned = _signals.signal('cloned', \"\"\" Called when", "specifying the source of the metadata. The *event* kwarg contains", "moved to a different category. The `sender` is the event,", "session is deleted. The *sender* is the session. \"\"\") session_block_deleted", "a different category. The `sender` is the event, the old", "session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called when a session block is", "created. The `sender` is the new Event. \"\"\") session_updated =", "event is deleted. The *sender* is the event object. The", "Note than the `person_links` change may happen with `old` and", "= _signals.signal('type-changed', \"\"\" Called when the type of an event", "kwarg contains the user performing the deletion if available. \"\"\")", "is deleted. The *sender* is the event object. The `user`", "executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected to return a", "free software; you can redistribute it and/or # modify it", "used to update the original representation (fields to add or", "a session block is deleted. The *sender* is the session", "`user` kwarg contains the user performing the deletion if available.", "is updated. The *sender* is the event. A dict of", "is free software; you can redistribute it and/or # modify", "*sender* is the session. \"\"\") session_deleted = _signals.signal('session-deleted', \"\"\" Called", "moved = _signals.signal('moved', \"\"\" Called when an event is moved", "new)`` tuples for each change. Note than the `person_links` change", "the new Event. \"\"\") session_updated = _signals.signal('session-updated', \"\"\" Called when", "return a list of tuples ('button_name', 'js-call-class'). Called when building", "passed in the `changes` kwarg, with ``(old, new)`` tuples for", "kwarg contains the event object. The metadata is passed in", "the ``db.session.delete()`` on the block is executed. \"\"\") timetable_buttons =", "in the `old_type` kwarg. \"\"\") moved = _signals.signal('moved', \"\"\" Called", "after a dict-like representation of an event is created, so", "performing the deletion if available. \"\"\") updated = _signals.signal('updated', \"\"\"", "cloned = _signals.signal('cloned', \"\"\" Called when an event is cloned.", "`sender` is the event, the old type is passed in", "# # Indico is free software; you can redistribute it", "session is updated. The *sender* is the session. \"\"\") session_deleted", "# Copyright (C) 2002 - 2020 CERN # # Indico", "can add their own fields. The *sender* is a string", "indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu', \"\"\" Expected to return", "event is created. The `sender` is the new Event. \"\"\")", "to update the original representation (fields to add or override).", "The *event* kwarg contains the event object. The metadata is", "deleted = _signals.signal('deleted', \"\"\" Called when an event is deleted.", "a list of tuples ('button_name', 'js-call-class'). Called when building the", "Called right after a dict-like representation of an event is", "passed in the `new_event` kwarg. \"\"\") type_changed = _signals.signal('type-changed', \"\"\"", "A dict of changes is passed in the `changes` kwarg,", "`sender` is the event, the old category is in the", "\"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\" Expected to return a list", "(usually the order or some data on the person link).", "2002 - 2020 CERN # # Indico is free software;", "deleted. The *sender* is the session block. This signal is", "= _signals.signal('get-log-renderers', \"\"\" Expected to return `EventLogRenderer` classes. \"\"\") get_feature_definitions", "is cloned. The *sender* is the `Event` object of the", "is the session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\" Called when", "Indico is free software; you can redistribute it and/or #", "type_changed = _signals.signal('type-changed', \"\"\" Called when the type of an", "is deleted. The *sender* is the session. \"\"\") session_block_deleted =", "a session is deleted. The *sender* is the session. \"\"\")", "``(old, new)`` tuples for each change. Note than the `person_links`", "new event is passed in the `new_event` kwarg. \"\"\") type_changed", "an event is cloned. The *sender* is the `Event` object", "key is present, it should be assumed that something changed", "assumed that something changed (usually the order or some data", "\"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called right after a dict-like", "their own fields. The *sender* is a string parameter specifying", "the event, the old category is in the `old_parent` kwarg.", "*sender* is the event. A dict of changes is passed", "\"\"\") deleted = _signals.signal('deleted', \"\"\" Called when an event is", "is passed in the `data` kwarg. The signal should return", "new event is created. The `sender` is the new Event.", "`new_event` kwarg. \"\"\") type_changed = _signals.signal('type-changed', \"\"\" Called when the", "= _signals.signal('timetable-buttons', \"\"\" Expected to return a list of tuples", "a dict-like representation of an event is created, so that", "Expected to return `EventFeature` subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\"", "with `old` and `new` being the same lists for technical", "The *sender* is the session block. This signal is called", "and `new` being the same lists for technical reasons. If", "an event is changed. The `sender` is the event, the", "details. from indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu', \"\"\" Expected", "directly, multiple entries must be yielded. \"\"\") deleted = _signals.signal('deleted',", "`old_type` kwarg. \"\"\") moved = _signals.signal('moved', \"\"\" Called when an", "('button_name', 'js-call-class'). Called when building the timetable view. \"\"\") get_log_renderers", "deleted. The *sender* is the session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted',", "be added to the event side menu. A single entry", "event, the old category is in the `old_parent` kwarg. \"\"\")", "The `sender` is the event, the old category is in", "that something changed (usually the order or some data on", "= _signals.signal('created', \"\"\" Called when a new event is created.", "is the session. \"\"\") session_deleted = _signals.signal('session-deleted', \"\"\" Called when", "and/or # modify it under the terms of the MIT", "kwarg. \"\"\") moved = _signals.signal('moved', \"\"\" Called when an event", "file is part of Indico. # Copyright (C) 2002 -", "the metadata. The *event* kwarg contains the event object. The", "type of an event is changed. The `sender` is the", "or some data on the person link). \"\"\") cloned =", "source of the metadata. The *event* kwarg contains the event", "may happen with `old` and `new` being the same lists", "updated. The *sender* is the session. \"\"\") session_deleted = _signals.signal('session-deleted',", "terms of the MIT License; see the # LICENSE file", "`person_links` change may happen with `old` and `new` being the", "created, so that plugins can add their own fields. The", "can be returned directly, multiple entries must be yielded. \"\"\")", "event object. The `user` kwarg contains the user performing the", "The *sender* is the session. \"\"\") session_block_deleted = _signals.signal('session-block-deleted', \"\"\"", "This signal is called before the ``db.session.delete()`` on the block", "in the `data` kwarg. The signal should return a dict", "the source of the metadata. The *event* kwarg contains the", "= _signals.signal('session-updated', \"\"\" Called when a session is updated. The", "Called when the type of an event is changed. The", "is present, it should be assumed that something changed (usually", "old type is passed in the `old_type` kwarg. \"\"\") moved", "= _signals.signal('session-deleted', \"\"\" Called when a session is deleted. The", "License; see the # LICENSE file for more details. from", "The `sender` is the event, the old type is passed", "kwarg, with ``(old, new)`` tuples for each change. Note than", "returned directly, multiple entries must be yielded. \"\"\") deleted =", "return ``MenuEntryData`` objects to be added to the event side", "object. The metadata is passed in the `data` kwarg. The", "be yielded. \"\"\") deleted = _signals.signal('deleted', \"\"\" Called when an", "contains the user performing the deletion if available. \"\"\") updated", "Called when an event is cloned. The *sender* is the", "``db.session.delete()`` on the block is executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons',", "category is in the `old_parent` kwarg. \"\"\") created = _signals.signal('created',", "return `EventFeature` subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called right", "_signals.signal('deleted', \"\"\" Called when an event is deleted. The *sender*", "is called before the ``db.session.delete()`` on the block is executed.", "event is cloned. The *sender* is the `Event` object of", "classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected to return `EventFeature`", "the `old_parent` kwarg. \"\"\") created = _signals.signal('created', \"\"\" Called when", "event. A dict of changes is passed in the `changes`", "of the old event, the new event is passed in", "objects to be added to the event side menu. A", "to return `EventLogRenderer` classes. \"\"\") get_feature_definitions = _signals.signal('get-feature-definitions', \"\"\" Expected", "`EventFeature` subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess', \"\"\" Called right after", "multiple entries must be yielded. \"\"\") deleted = _signals.signal('deleted', \"\"\"", "of Indico. # Copyright (C) 2002 - 2020 CERN #", "lists for technical reasons. If the key is present, it", "add their own fields. The *sender* is a string parameter", "more details. from indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu', \"\"\"", "sidemenu = _signals.signal('sidemenu', \"\"\" Expected to return ``MenuEntryData`` objects to", "the deletion if available. \"\"\") updated = _signals.signal('updated', \"\"\" Called", "_signals.signal('get-feature-definitions', \"\"\" Expected to return `EventFeature` subclasses. \"\"\") metadata_postprocess =", "\"\"\" Called when basic data of an event is updated.", "the MIT License; see the # LICENSE file for more", "get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected to return `EventLogRenderer` classes. \"\"\")", "kwarg. The signal should return a dict that will be", "<gh_stars>0 # This file is part of Indico. # Copyright", "_signals.signal('session-block-deleted', \"\"\" Called when a session block is deleted. The", "before the ``db.session.delete()`` on the block is executed. \"\"\") timetable_buttons", "the type of an event is changed. The `sender` is", "person link). \"\"\") cloned = _signals.signal('cloned', \"\"\" Called when an", "= _signals.signal('session-block-deleted', \"\"\" Called when a session block is deleted.", "happen with `old` and `new` being the same lists for", "`changes` kwarg, with ``(old, new)`` tuples for each change. Note", "signal is called before the ``db.session.delete()`` on the block is", "contains the event object. The metadata is passed in the", "event is moved to a different category. The `sender` is", "modify it under the terms of the MIT License; see", "\"\"\" Expected to return ``MenuEntryData`` objects to be added to", "string parameter specifying the source of the metadata. The *event*", "can redistribute it and/or # modify it under the terms", "\"\"\") moved = _signals.signal('moved', \"\"\" Called when an event is", "data of an event is updated. The *sender* is the", "same lists for technical reasons. If the key is present,", "\"\"\") session_deleted = _signals.signal('session-deleted', \"\"\" Called when a session is", "return a dict that will be used to update the", "is the new Event. \"\"\") session_updated = _signals.signal('session-updated', \"\"\" Called", "from indico.core.signals.event import _signals sidemenu = _signals.signal('sidemenu', \"\"\" Expected to", "\"\"\" Called right after a dict-like representation of an event", "view. \"\"\") get_log_renderers = _signals.signal('get-log-renderers', \"\"\" Expected to return `EventLogRenderer`", "event is updated. The *sender* is the event. A dict", "- 2020 CERN # # Indico is free software; you", "is passed in the `changes` kwarg, with ``(old, new)`` tuples", "`new` being the same lists for technical reasons. If the", "event is created, so that plugins can add their own", "see the # LICENSE file for more details. from indico.core.signals.event", "fields. The *sender* is a string parameter specifying the source", "_signals.signal('session-deleted', \"\"\" Called when a session is deleted. The *sender*", "on the block is executed. \"\"\") timetable_buttons = _signals.signal('timetable-buttons', \"\"\"", "\"\"\" Expected to return `EventFeature` subclasses. \"\"\") metadata_postprocess = _signals.signal('metadata-postprocess',", "a session is updated. The *sender* is the session. \"\"\")", "event object. The metadata is passed in the `data` kwarg.", "is created, so that plugins can add their own fields.", "Expected to return ``MenuEntryData`` objects to be added to the", "type is passed in the `old_type` kwarg. \"\"\") moved =", "session block is deleted. The *sender* is the session block.", "is the `Event` object of the old event, the new", "\"\"\" Called when the type of an event is changed.", "cloned. The *sender* is the `Event` object of the old", "Called when an event is deleted. The *sender* is the", "be assumed that something changed (usually the order or some", "dict-like representation of an event is created, so that plugins", "entry can be returned directly, multiple entries must be yielded.", "being the same lists for technical reasons. If the key", "in the `new_event` kwarg. \"\"\") type_changed = _signals.signal('type-changed', \"\"\" Called", "to return a list of tuples ('button_name', 'js-call-class'). Called when", "CERN # # Indico is free software; you can redistribute", "the `old_type` kwarg. \"\"\") moved = _signals.signal('moved', \"\"\" Called when", "_signals.signal('created', \"\"\" Called when a new event is created. The", "deletion if available. \"\"\") updated = _signals.signal('updated', \"\"\" Called when", "of the metadata. The *event* kwarg contains the event object.", "*event* kwarg contains the event object. The metadata is passed", "Event. \"\"\") session_updated = _signals.signal('session-updated', \"\"\" Called when a session", "the new event is passed in the `new_event` kwarg. \"\"\")", "Indico. # Copyright (C) 2002 - 2020 CERN # #", "of an event is changed. The `sender` is the event,", "Called when a session block is deleted. The *sender* is", "The `user` kwarg contains the user performing the deletion if", "\"\"\") session_updated = _signals.signal('session-updated', \"\"\" Called when a session is", "the event side menu. A single entry can be returned", "an event is updated. The *sender* is the event. A", "``MenuEntryData`` objects to be added to the event side menu.", "*sender* is the `Event` object of the old event, the", "event, the old type is passed in the `old_type` kwarg.", "single entry can be returned directly, multiple entries must be", "the event, the old type is passed in the `old_type`", "when a session block is deleted. The *sender* is the", "when basic data of an event is updated. The *sender*" ]
[ "from cinder import exception from cinder.tests.unit import fake_constants as fake", "``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a test case environment. Creates a", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "} def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting", "may obtain # a copy of the License at #", "# All Rights Reserved. # # Licensed under the Apache", "# # Licensed under the Apache License, Version 2.0 (the", "EMC Corporation. # All Rights Reserved. # # Licensed under", "Test', }, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException,", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', },", "self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, },", "Unless required by applicable law or agreed to in writing,", "six.moves import urllib from cinder import context from cinder import", "super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj(", "), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus", "'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse(", "(c) 2013 - 2015 EMC Corporation. # All Rights Reserved.", "distributed under the License is distributed on an \"AS IS\"", "# under the License. from six.moves import urllib from cinder", "before delete flag for tests \"\"\" self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True) self.driver.delete_volume(self.volume)", "setUp(self): \"\"\"Setup a test case environment. Creates a fake volume", "fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES", "required API responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake',", "= urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::'", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "a test case environment. Creates a fake volume object and", "fake volume object and sets up the required API responses.", "'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: {", "obtain # a copy of the License at # #", "applicable law or agreed to in writing, software # distributed", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Version 2.0 (the \"License\"); you may # not use this", "specific language governing permissions and limitations # under the License.", "and sets up the required API responses. \"\"\" super(TestDeleteVolume, self).setUp()", "= fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) )", "'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume", "# not use this file except in compliance with the", "not use this file except in compliance with the License.", "OF ANY KIND, either express or implied. See the #", "Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( {", "401, 'message': 'BadStatus Volume Test', }, 401 ), }, }", "writing, software # distributed under the License is distributed on", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio", "TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a test", "limitations # under the License. from six.moves import urllib from", "in compliance with the License. You may obtain # a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations #", "urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' +", "self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting the unmap volume before delete", "a fake volume object and sets up the required API", "cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a test case environment.", "the License. You may obtain # a copy of the", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "use this file except in compliance with the License. You", "You may obtain # a copy of the License at", "self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting the unmap volume before", "self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format(", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume", "the required API responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake',", "mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup", "import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit", "def test_delete_volume(self): \"\"\"Setting the unmap volume before delete flag for", "import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases", "self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::'", "}, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks", "class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a", "Rights Reserved. # # Licensed under the Apache License, Version", "\"\"\"Setup a test case environment. Creates a fake volume object", "'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc =", "Creates a fake volume object and sets up the required", "responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume", "exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import", "auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote(", "either express or implied. See the # License for the", "cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume from", "under the License is distributed on an \"AS IS\" BASIS,", "under the License. from six.moves import urllib from cinder import", "permissions and limitations # under the License. from six.moves import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "volume before delete flag for tests \"\"\" self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion', override=True)", "fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid:", "may # not use this file except in compliance with", "self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting the unmap volume", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "volume object and sets up the required API responses. \"\"\"", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "with the License. You may obtain # a copy of", "KIND, either express or implied. See the # License for", "# License for the specific language governing permissions and limitations", "Reserved. # # Licensed under the Apache License, Version 2.0", "Volume Test', }, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)", "you may # not use this file except in compliance", "governing permissions and limitations # under the License. from six.moves", "\"License\"); you may # not use this file except in", "fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class", "scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for", "401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message':", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ):", "express or implied. See the # License for the specific", "this file except in compliance with the License. You may", "language governing permissions and limitations # under the License. from", "and limitations # under the License. from six.moves import urllib", "'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format(", "compliance with the License. You may obtain # a copy", "self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id))", "): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( {", "the Apache License, Version 2.0 (the \"License\"); you may #", "self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message':", "from cinder import context from cinder import exception from cinder.tests.unit", "'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc:", "from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\"", "environment. Creates a fake volume object and sets up the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "from six.moves import urllib from cinder import context from cinder", "the License. from six.moves import urllib from cinder import context", "def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting the", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "See the # License for the specific language governing permissions", "{ self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id,", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "test case environment. Creates a fake volume object and sets", "context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc", "= { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id):", "Copyright (c) 2013 - 2015 EMC Corporation. # All Rights", "self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx,", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "cinder import exception from cinder.tests.unit import fake_constants as fake from", "the # License for the specific language governing permissions and", "self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus:", "test_delete_volume(self): \"\"\"Setting the unmap volume before delete flag for tests", "}, 401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume,", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# # Unless required by applicable law or agreed to", "cinder import context from cinder import exception from cinder.tests.unit import", "case environment. Creates a fake volume object and sets up", "2015 EMC Corporation. # All Rights Reserved. # # Licensed", "test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self): \"\"\"Setting the unmap", "'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ), },", "import urllib from cinder import context from cinder import exception", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "file except in compliance with the License. You may obtain", "2013 - 2015 EMC Corporation. # All Rights Reserved. #", "\"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a test case", "+ self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id,", "API responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True)", "401 ), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume)", "{ 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus", "), }, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def", "for the specific language governing permissions and limitations # under", "}, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401,", "law or agreed to in writing, software # distributed under", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import", "cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def", "object and sets up the required API responses. \"\"\" super(TestDeleteVolume,", "unmap volume before delete flag for tests \"\"\" self.driver.configuration.set_override( 'sio_unmap_volume_before_deletion',", "urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc:", "context from cinder import exception from cinder.tests.unit import fake_constants as", "'message': 'BadStatus Volume Test', }, 401 ), }, } def", "self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode':", "under the Apache License, Version 2.0 (the \"License\"); you may", "up the required API responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx =", "except in compliance with the License. You may obtain #", "2.0 (the \"License\"); you may # not use this file", "implied. See the # License for the specific language governing", "cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test", "def setUp(self): \"\"\"Setup a test case environment. Creates a fake", "}, } def test_bad_login_and_volume(self): self.set_https_response_mode(self.RESPONSE_MODE.BadStatus) self.assertRaises(exception.VolumeBackendAPIException, self.driver.delete_volume, self.volume) def test_delete_volume(self):", "'BadStatus Volume Test', }, 401 ), }, } def test_bad_login_and_volume(self):", "cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import", ") self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id,", "self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' +", "fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio from", "from cinder.tests.unit.volume.drivers.emc import scaleio from cinder.tests.unit.volume.drivers.emc.scaleio import mocks class TestDeleteVolume(scaleio.TestScaleIODriver):", "License. You may obtain # a copy of the License", "'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ):", "self.volume.provider_id ): self.volume.provider_id, }, self.RESPONSE_MODE.BadStatus: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: mocks.MockHTTPSResponse(", "401, 'message': 'BadStatus Volume Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id", "import context from cinder import exception from cinder.tests.unit import fake_constants", "as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc import scaleio", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "ANY KIND, either express or implied. See the # License", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "urllib from cinder import context from cinder import exception from", "{ 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format( self.volume.provider_id): self.volume.provider_id, 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id", "self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test',", "# Unless required by applicable law or agreed to in", "sets up the required API responses. \"\"\" super(TestDeleteVolume, self).setUp() ctx", "**{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = {", "import fake_constants as fake from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.emc", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "{ 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401 ),", "self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: {", "to in writing, software # distributed under the License is", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "\"\"\" super(TestDeleteVolume, self).setUp() ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume =", "License. from six.moves import urllib from cinder import context from", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "Corporation. # All Rights Reserved. # # Licensed under the", "Test', }, 401 ), 'instances/Volume::{}/action/removeVolume'.format( self.volume.provider_id ): mocks.MockHTTPSResponse( { 'errorCode':", "or agreed to in writing, software # distributed under the", "- 2015 EMC Corporation. # All Rights Reserved. # #", "\"\"\"Setting the unmap volume before delete flag for tests \"\"\"", "required by applicable law or agreed to in writing, software", "+ self.volume_name_2x_enc: mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test',", "ctx = context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id':", "self.volume) def test_delete_volume(self): \"\"\"Setting the unmap volume before delete flag", "from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_volume", "for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self): \"\"\"Setup a test case environment. Creates", "# Copyright (c) 2013 - 2015 EMC Corporation. # All", "the unmap volume before delete flag for tests \"\"\" self.driver.configuration.set_override(", "ctx, **{'provider_id': fake.PROVIDER_ID}) self.volume_name_2x_enc = urllib.parse.quote( urllib.parse.quote(self.driver._id_to_base64(self.volume.id)) ) self.HTTPS_MOCK_RESPONSES =", "= context.RequestContext('fake', 'fake', auth_token=True) self.volume = fake_volume.fake_volume_obj( ctx, **{'provider_id': fake.PROVIDER_ID})", "mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }, 401", "import mocks class TestDeleteVolume(scaleio.TestScaleIODriver): \"\"\"Test cases for ``ScaleIODriver.delete_volume()``\"\"\" def setUp(self):", "self.HTTPS_MOCK_RESPONSES = { self.RESPONSE_MODE.Valid: { 'types/Volume/instances/getByName::' + self.volume_name_2x_enc: self.volume.id, 'instances/Volume::{}/action/removeMappedSdc'.format(", "or implied. See the # License for the specific language", "Apache License, Version 2.0 (the \"License\"); you may # not", "): mocks.MockHTTPSResponse( { 'errorCode': 401, 'message': 'BadStatus Volume Test', }," ]
[ "with the tutorials. Parameter --------- cache_dir : Path-like or False,", "osmnx as ox import joblib import requests from .files import", "location = None memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache", "memory, cache_dir if location is None: location = appdirs.user_cache_dir('transportation_tutorials') if", "(requests, 'get'), (requests, 'post'), ) for module, func_name in make_cache:", "import appdirs import osmnx as ox import joblib import requests", "Parameter --------- cache_dir : Path-like or False, optional A path", "import load_vars, save_vars, cached, inflate_tar, download_zipfile from .data import data,", "tutorials. Parameter --------- cache_dir : Path-like or False, optional A", "in make_cache: try: func = getattr(module, f\"_{func_name}_orig\") except AttributeError: func", "save_vars, cached, inflate_tar, download_zipfile from .data import data, list_data, problematic", "import show_file from . import mapping cache_dir = None memory", "location is False: location = None memory = joblib.Memory(location, compress=compress,", "joblib import requests from .files import load_vars, save_vars, cached, inflate_tar,", "cache directory for use with the tutorials. Parameter --------- cache_dir", "make_cache = ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests,", ".data import data, list_data, problematic from .tools.view_code import show_file from", "as ox import joblib import requests from .files import load_vars,", "joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache = ( (ox, 'gdf_from_place'), (ox,", "def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\" Set up a cache", "compress=True, verbose=0, **kwargs): \"\"\" Set up a cache directory for", "from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile from .data", "None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\" Set up a", "mapping cache_dir = None memory = None def set_cache_dir(location=None, compress=True,", "requests from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile from", "-*- coding: utf-8 -*- __version__ = '1.0.2' import os import", "cache files. Set to False to disable caching. \"\"\" global", "problematic from .tools.view_code import show_file from . import mapping cache_dir", "(ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'), ) for", "from . import mapping cache_dir = None memory = None", "= None memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache =", "the cache files. Set to False to disable caching. \"\"\"", "import joblib import requests from .files import load_vars, save_vars, cached,", "= getattr(module, func_name) setattr(module, f\"_{func_name}_orig\", func) setattr(module, func_name, memory.cache(func)) set_cache_dir()", "path for the cache files. Set to False to disable", "for the cache files. Set to False to disable caching.", "= '1.0.2' import os import appdirs import osmnx as ox", "from .data import data, list_data, problematic from .tools.view_code import show_file", "appdirs import osmnx as ox import joblib import requests from", "False to disable caching. \"\"\" global memory, cache_dir if location", "load_vars, save_vars, cached, inflate_tar, download_zipfile from .data import data, list_data,", "caching. \"\"\" global memory, cache_dir if location is None: location", "verbose=verbose, **kwargs) make_cache = ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests,", "disable caching. \"\"\" global memory, cache_dir if location is None:", "None: location = appdirs.user_cache_dir('transportation_tutorials') if location is False: location =", "coding: utf-8 -*- __version__ = '1.0.2' import os import appdirs", "'post'), ) for module, func_name in make_cache: try: func =", "up a cache directory for use with the tutorials. Parameter", "files. Set to False to disable caching. \"\"\" global memory,", "os import appdirs import osmnx as ox import joblib import", "func = getattr(module, f\"_{func_name}_orig\") except AttributeError: func = getattr(module, func_name)", "if location is False: location = None memory = joblib.Memory(location,", "False: location = None memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs)", "make_cache: try: func = getattr(module, f\"_{func_name}_orig\") except AttributeError: func =", "cache_dir = None memory = None def set_cache_dir(location=None, compress=True, verbose=0,", "use with the tutorials. Parameter --------- cache_dir : Path-like or", "compress=compress, verbose=verbose, **kwargs) make_cache = ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'),", "= appdirs.user_cache_dir('transportation_tutorials') if location is False: location = None memory", "import requests from .files import load_vars, save_vars, cached, inflate_tar, download_zipfile", "'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'), ) for module,", "show_file from . import mapping cache_dir = None memory =", "import osmnx as ox import joblib import requests from .files", "\"\"\" global memory, cache_dir if location is None: location =", "func_name in make_cache: try: func = getattr(module, f\"_{func_name}_orig\") except AttributeError:", "cache_dir if location is None: location = appdirs.user_cache_dir('transportation_tutorials') if location", "Path-like or False, optional A path for the cache files.", "ox import joblib import requests from .files import load_vars, save_vars,", "memory = None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\" Set", "'get'), (requests, 'post'), ) for module, func_name in make_cache: try:", "getattr(module, f\"_{func_name}_orig\") except AttributeError: func = getattr(module, func_name) setattr(module, f\"_{func_name}_orig\",", "# -*- coding: utf-8 -*- __version__ = '1.0.2' import os", "AttributeError: func = getattr(module, func_name) setattr(module, f\"_{func_name}_orig\", func) setattr(module, func_name,", "inflate_tar, download_zipfile from .data import data, list_data, problematic from .tools.view_code", ".tools.view_code import show_file from . import mapping cache_dir = None", "'graph_from_bbox'), (requests, 'get'), (requests, 'post'), ) for module, func_name in", "or False, optional A path for the cache files. Set", "import data, list_data, problematic from .tools.view_code import show_file from .", "module, func_name in make_cache: try: func = getattr(module, f\"_{func_name}_orig\") except", "location = appdirs.user_cache_dir('transportation_tutorials') if location is False: location = None", "cached, inflate_tar, download_zipfile from .data import data, list_data, problematic from", "func = getattr(module, func_name) setattr(module, f\"_{func_name}_orig\", func) setattr(module, func_name, memory.cache(func))", "None memory = None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\"", "Set to False to disable caching. \"\"\" global memory, cache_dir", "is False: location = None memory = joblib.Memory(location, compress=compress, verbose=verbose,", "f\"_{func_name}_orig\") except AttributeError: func = getattr(module, func_name) setattr(module, f\"_{func_name}_orig\", func)", "for use with the tutorials. Parameter --------- cache_dir : Path-like", "A path for the cache files. Set to False to", "None memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache = (", "the tutorials. Parameter --------- cache_dir : Path-like or False, optional", "location is None: location = appdirs.user_cache_dir('transportation_tutorials') if location is False:", "-*- __version__ = '1.0.2' import os import appdirs import osmnx", "import mapping cache_dir = None memory = None def set_cache_dir(location=None,", "cache_dir : Path-like or False, optional A path for the", "directory for use with the tutorials. Parameter --------- cache_dir :", "if location is None: location = appdirs.user_cache_dir('transportation_tutorials') if location is", "for module, func_name in make_cache: try: func = getattr(module, f\"_{func_name}_orig\")", "to disable caching. \"\"\" global memory, cache_dir if location is", "memory = joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache = ( (ox,", "utf-8 -*- __version__ = '1.0.2' import os import appdirs import", "__version__ = '1.0.2' import os import appdirs import osmnx as", "= ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'),", "to False to disable caching. \"\"\" global memory, cache_dir if", "global memory, cache_dir if location is None: location = appdirs.user_cache_dir('transportation_tutorials')", "Set up a cache directory for use with the tutorials.", "import os import appdirs import osmnx as ox import joblib", "False, optional A path for the cache files. Set to", ".files import load_vars, save_vars, cached, inflate_tar, download_zipfile from .data import", "= joblib.Memory(location, compress=compress, verbose=verbose, **kwargs) make_cache = ( (ox, 'gdf_from_place'),", "except AttributeError: func = getattr(module, func_name) setattr(module, f\"_{func_name}_orig\", func) setattr(module,", "from .tools.view_code import show_file from . import mapping cache_dir =", "(ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'), ) for module, func_name", "set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\" Set up a cache directory", "--------- cache_dir : Path-like or False, optional A path for", "( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'), (requests, 'post'), )", "a cache directory for use with the tutorials. Parameter ---------", "verbose=0, **kwargs): \"\"\" Set up a cache directory for use", "try: func = getattr(module, f\"_{func_name}_orig\") except AttributeError: func = getattr(module,", "list_data, problematic from .tools.view_code import show_file from . import mapping", "**kwargs) make_cache = ( (ox, 'gdf_from_place'), (ox, 'graph_from_bbox'), (requests, 'get'),", "appdirs.user_cache_dir('transportation_tutorials') if location is False: location = None memory =", ") for module, func_name in make_cache: try: func = getattr(module,", "\"\"\" Set up a cache directory for use with the", "**kwargs): \"\"\" Set up a cache directory for use with", "= getattr(module, f\"_{func_name}_orig\") except AttributeError: func = getattr(module, func_name) setattr(module,", ". import mapping cache_dir = None memory = None def", ": Path-like or False, optional A path for the cache", "= None memory = None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs):", "download_zipfile from .data import data, list_data, problematic from .tools.view_code import", "optional A path for the cache files. Set to False", "= None def set_cache_dir(location=None, compress=True, verbose=0, **kwargs): \"\"\" Set up", "data, list_data, problematic from .tools.view_code import show_file from . import", "is None: location = appdirs.user_cache_dir('transportation_tutorials') if location is False: location", "(requests, 'post'), ) for module, func_name in make_cache: try: func", "'1.0.2' import os import appdirs import osmnx as ox import" ]
[ "except Exception as e: return False class Timestamp: # a", "requests.models import PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url,", "return True elif self.minutes > other.minutes: return False if self.seconds", "other.minutes and self.seconds == other.seconds and self.milliseconds == other.milliseconds def", "PreparedRequest() try: prepared_request.prepare_url(url, None) return True except Exception as e:", "self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0 for", "from requests.models import PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest() try:", "3600000 temp %= 3600000 t.minutes = temp // 60000 temp", "%= 60000 t.seconds = temp // 1000 t.milliseconds = temp", "and self.minutes == other.minutes and self.seconds == other.seconds and self.milliseconds", "prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None) return True except Exception", "True elif self.minutes > other.minutes: return False if self.seconds <", "other.minutes: return True elif self.minutes > other.minutes: return False if", "__eq__(self, other): return self.hours == other.hours and self.minutes == other.minutes", "self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds))", "self.seconds == other.seconds and self.milliseconds == other.milliseconds def __lt__(self, other):", "self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"):", "def __lt__(self, other): if self.hours < other.hours: return True elif", "<reponame>czajowaty/curry-bot<filename>common/common.py from requests.models import PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest()", "s.split(): if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds +=", "ms t.hours = temp // 3600000 temp %= 3600000 t.minutes", "60000 temp %= 60000 t.seconds = temp // 1000 t.milliseconds", "result.append(\"{}h\".format(self.hours)) if not (self.hours == 0 and self.minutes == 0):", "' '.join(result) def __eq__(self, other): return self.hours == other.hours and", "'.join(result) def __eq__(self, other): return self.hours == other.hours and self.minutes", "False if self.minutes < other.minutes: return True elif self.minutes >", "Timestamp: # a speedrun.com style timestamp e.g. \"3h 53m 233s", "arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif", "self.milliseconds = 0, 0, 0, 0 for arg in s.split():", "!= 0: result.append(\"{}h\".format(self.hours)) if not (self.hours == 0 and self.minutes", "+= int(arg[:-1]) @staticmethod def from_milliseconds(ms): t = Timestamp(\"0ms\") temp =", "and self.seconds == other.seconds and self.milliseconds == other.milliseconds def __lt__(self,", "== other.seconds and self.milliseconds == other.milliseconds def __lt__(self, other): if", "def __str__(self): result = [] if self.hours != 0: result.append(\"{}h\".format(self.hours))", "speedrun.com style timestamp e.g. \"3h 53m 233s 380ms\" def __init__(self,", "other.seconds and self.milliseconds == other.milliseconds def __lt__(self, other): if self.hours", "self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def __eq__(self, other):", "elif self.minutes > other.minutes: return False if self.seconds < other.seconds:", "== 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return", "== 0 and self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds", "other.minutes: return False if self.seconds < other.seconds: return True elif", "True except Exception as e: return False class Timestamp: #", "self.hours != 0: result.append(\"{}h\".format(self.hours)) if not (self.hours == 0 and", "0, 0, 0, 0 for arg in s.split(): if arg.endswith(\"ms\"):", "None) return True except Exception as e: return False class", "self.minutes > other.minutes: return False if self.seconds < other.seconds: return", "return False class Timestamp: # a speedrun.com style timestamp e.g.", "= temp // 1000 t.milliseconds = temp % 1000 return", "import PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None)", "= temp // 3600000 temp %= 3600000 t.minutes = temp", "+= int(arg[:-1]) elif arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms):", "[] if self.hours != 0: result.append(\"{}h\".format(self.hours)) if not (self.hours ==", "1000 t.milliseconds = temp % 1000 return t def __str__(self):", "arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod", "== other.minutes and self.seconds == other.seconds and self.milliseconds == other.milliseconds", "Exception as e: return False class Timestamp: # a speedrun.com", "// 1000 t.milliseconds = temp % 1000 return t def", "for arg in s.split(): if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif", "e.g. \"3h 53m 233s 380ms\" def __init__(self, s): self.hours, self.minutes,", "def from_milliseconds(ms): t = Timestamp(\"0ms\") temp = ms t.hours =", "__str__(self): result = [] if self.hours != 0: result.append(\"{}h\".format(self.hours)) if", "0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return '", "def __eq__(self, other): return self.hours == other.hours and self.minutes ==", "+= int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"): self.hours", "other.milliseconds def __lt__(self, other): if self.hours < other.hours: return True", "t.seconds = temp // 1000 t.milliseconds = temp % 1000", "self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod def", "return True elif self.seconds > other.seconds: return False return self.milliseconds", "class Timestamp: # a speedrun.com style timestamp e.g. \"3h 53m", "arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif", "t def __str__(self): result = [] if self.hours != 0:", "e: return False class Timestamp: # a speedrun.com style timestamp", "def is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None) return True", "elif arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"): self.hours += int(arg[:-1])", "result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result)", "if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def __eq__(self,", "__init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0,", "53m 233s 380ms\" def __init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds", "return False if self.seconds < other.seconds: return True elif self.seconds", "self.minutes == other.minutes and self.seconds == other.seconds and self.milliseconds ==", "Timestamp(\"0ms\") temp = ms t.hours = temp // 3600000 temp", "> other.minutes: return False if self.seconds < other.seconds: return True", "other.seconds: return True elif self.seconds > other.seconds: return False return", "self.hours > other.hours: return False if self.minutes < other.minutes: return", "= Timestamp(\"0ms\") temp = ms t.hours = temp // 3600000", "380ms\" def __init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds = 0,", "0, 0 for arg in s.split(): if arg.endswith(\"ms\"): self.milliseconds +=", "elif self.seconds > other.seconds: return False return self.milliseconds < other.milliseconds", "self.hours < other.hours: return True elif self.hours > other.hours: return", "= ms t.hours = temp // 3600000 temp %= 3600000", "and self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0:", "< other.hours: return True elif self.hours > other.hours: return False", "as e: return False class Timestamp: # a speedrun.com style", "if self.hours < other.hours: return True elif self.hours > other.hours:", "temp %= 60000 t.seconds = temp // 1000 t.milliseconds =", "int(arg[:-1]) elif arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms): t", "233s 380ms\" def __init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds =", "temp = ms t.hours = temp // 3600000 temp %=", "False class Timestamp: # a speedrun.com style timestamp e.g. \"3h", "return self.hours == other.hours and self.minutes == other.minutes and self.seconds", "self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"):", "temp // 60000 temp %= 60000 t.seconds = temp //", "int(arg[:-1]) @staticmethod def from_milliseconds(ms): t = Timestamp(\"0ms\") temp = ms", "elif arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes += int(arg[:-1])", "# a speedrun.com style timestamp e.g. \"3h 53m 233s 380ms\"", "t = Timestamp(\"0ms\") temp = ms t.hours = temp //", "int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes += int(arg[:-1]) elif arg.endswith(\"h\"): self.hours +=", "from_milliseconds(ms): t = Timestamp(\"0ms\") temp = ms t.hours = temp", "result = [] if self.hours != 0: result.append(\"{}h\".format(self.hours)) if not", "t.minutes = temp // 60000 temp %= 60000 t.seconds =", "result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def __eq__(self, other): return self.hours ==", "int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes +=", "True elif self.seconds > other.seconds: return False return self.milliseconds <", "return True elif self.hours > other.hours: return False if self.minutes", "// 60000 temp %= 60000 t.seconds = temp // 1000", "result.append(\"{}s\".format(self.seconds)) if self.milliseconds > 0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def", "other.hours: return False if self.minutes < other.minutes: return True elif", "> other.hours: return False if self.minutes < other.minutes: return True", "% 1000 return t def __str__(self): result = [] if", "< other.seconds: return True elif self.seconds > other.seconds: return False", "arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms): t = Timestamp(\"0ms\")", "s): self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0", "> 0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def __eq__(self, other): return", "temp % 1000 return t def __str__(self): result = []", "temp %= 3600000 t.minutes = temp // 60000 temp %=", "3600000 t.minutes = temp // 60000 temp %= 60000 t.seconds", "(self.hours == 0 and self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if", "== other.milliseconds def __lt__(self, other): if self.hours < other.hours: return", "= 0, 0, 0, 0 for arg in s.split(): if", "a speedrun.com style timestamp e.g. \"3h 53m 233s 380ms\" def", "t.milliseconds = temp % 1000 return t def __str__(self): result", "self.seconds < other.seconds: return True elif self.seconds > other.seconds: return", "is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None) return True except", "temp // 1000 t.milliseconds = temp % 1000 return t", "@staticmethod def from_milliseconds(ms): t = Timestamp(\"0ms\") temp = ms t.hours", "0 for arg in s.split(): if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2])", "\"3h 53m 233s 380ms\" def __init__(self, s): self.hours, self.minutes, self.seconds,", "self.milliseconds == other.milliseconds def __lt__(self, other): if self.hours < other.hours:", "if self.minutes < other.minutes: return True elif self.minutes > other.minutes:", "self.seconds, self.milliseconds = 0, 0, 0, 0 for arg in", "self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms): t = Timestamp(\"0ms\") temp", "= [] if self.hours != 0: result.append(\"{}h\".format(self.hours)) if not (self.hours", "if not (self.hours == 0 and self.minutes == 0): result.append(\"{}m\".format(self.minutes))", "other): return self.hours == other.hours and self.minutes == other.minutes and", "def __init__(self, s): self.hours, self.minutes, self.seconds, self.milliseconds = 0, 0,", "elif self.hours > other.hours: return False if self.minutes < other.minutes:", "= PreparedRequest() try: prepared_request.prepare_url(url, None) return True except Exception as", "return True except Exception as e: return False class Timestamp:", "= temp % 1000 return t def __str__(self): result =", "if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds += int(arg[:-1])", "other.hours: return True elif self.hours > other.hours: return False if", "PreparedRequest def is_valid_url(url): prepared_request = PreparedRequest() try: prepared_request.prepare_url(url, None) return", "return ' '.join(result) def __eq__(self, other): return self.hours == other.hours", "< other.minutes: return True elif self.minutes > other.minutes: return False", "%= 3600000 t.minutes = temp // 60000 temp %= 60000", "and self.milliseconds == other.milliseconds def __lt__(self, other): if self.hours <", "in s.split(): if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds", "+= int(arg[:-2]) elif arg.endswith(\"s\"): self.seconds += int(arg[:-1]) elif arg.endswith(\"m\"): self.minutes", "timestamp e.g. \"3h 53m 233s 380ms\" def __init__(self, s): self.hours,", "return False if self.minutes < other.minutes: return True elif self.minutes", "other.hours and self.minutes == other.minutes and self.seconds == other.seconds and", "self.minutes, self.seconds, self.milliseconds = 0, 0, 0, 0 for arg", "self.hours == other.hours and self.minutes == other.minutes and self.seconds ==", "1000 return t def __str__(self): result = [] if self.hours", "True elif self.hours > other.hours: return False if self.minutes <", "if self.seconds < other.seconds: return True elif self.seconds > other.seconds:", "elif arg.endswith(\"h\"): self.hours += int(arg[:-1]) @staticmethod def from_milliseconds(ms): t =", "try: prepared_request.prepare_url(url, None) return True except Exception as e: return", "0: result.append(\"{}h\".format(self.hours)) if not (self.hours == 0 and self.minutes ==", "temp // 3600000 temp %= 3600000 t.minutes = temp //", "not (self.hours == 0 and self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds))", "t.hours = temp // 3600000 temp %= 3600000 t.minutes =", "60000 t.seconds = temp // 1000 t.milliseconds = temp %", "== other.hours and self.minutes == other.minutes and self.seconds == other.seconds", "prepared_request.prepare_url(url, None) return True except Exception as e: return False", "= temp // 60000 temp %= 60000 t.seconds = temp", "0 and self.minutes == 0): result.append(\"{}m\".format(self.minutes)) result.append(\"{}s\".format(self.seconds)) if self.milliseconds >", "other): if self.hours < other.hours: return True elif self.hours >", "False if self.seconds < other.seconds: return True elif self.seconds >", "self.minutes < other.minutes: return True elif self.minutes > other.minutes: return", "0: result.append(\"{}ms\".format(self.milliseconds)) return ' '.join(result) def __eq__(self, other): return self.hours", "__lt__(self, other): if self.hours < other.hours: return True elif self.hours", "style timestamp e.g. \"3h 53m 233s 380ms\" def __init__(self, s):", "0, 0, 0 for arg in s.split(): if arg.endswith(\"ms\"): self.milliseconds", "return t def __str__(self): result = [] if self.hours !=", "if self.hours != 0: result.append(\"{}h\".format(self.hours)) if not (self.hours == 0", "arg in s.split(): if arg.endswith(\"ms\"): self.milliseconds += int(arg[:-2]) elif arg.endswith(\"s\"):", "// 3600000 temp %= 3600000 t.minutes = temp // 60000" ]
[ "from hendrix import ux from mock import patch class TestMain(HendrixTestCase):", "with patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value = Process() ux.main()", "django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = \"\"", "options['daemonize'] = True options['traceback'] = True stdout = sys.stdout stderr", "self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = [] def test_options_structure(self): \"\"\" A", "with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS)", "= user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def", "of options available \"\"\" deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys()", "findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS user_settings = 'myproject.settings'", "= ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options", "test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError,", "def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value", "hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull, 'w') self.args_list =", "= sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect,", "SettingsError from hendrix.options import options as hx_options from hendrix import", "self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS", "stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options = self.DEFAULTS", "self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options)", "popen.call_args[0][0]) sys.argv = [] def test_options_structure(self): \"\"\" A test to", "wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path =", "= sys.path sys.path = [p for p in _path if", "for p in _path if p != cwd] self.assertTrue(cwd not", "sys.path) sys.path = [p for p in sys.path if p", "= True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def", "'_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def", "self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule')", "def test_cwd_exposure(self): cwd = os.getcwd() _path = sys.path sys.path =", "setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull", "findSettingsMod.return_value = django_settings options = self.DEFAULTS user_settings = 'myproject.settings' options['settings']", "options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd = os.getcwd()", "test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options", "options as hx_options from hendrix import ux from mock import", "stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv = self.args_list + ['-d',", "findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings =", "os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path)", "= '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self):", "test_pythonpath(self): options = self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' )", "= self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd", "= ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options", "in popen.call_args[0][0]) sys.argv = [] def test_options_structure(self): \"\"\" A test", "patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def", "[p for p in _path if p != cwd] self.assertTrue(cwd", "= ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet']", "ux.exposeProject, options) def test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] = True", "user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi':", "os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull, 'w') self.args_list = ['hx',", "self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod:", "self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull,", "ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options =", "test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options = self.DEFAULTS options['dev']", "def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings =", "= True stdout = sys.stdout stderr = sys.stderr redirect =", "import os import sys from . import HendrixTestCase, TEST_SETTINGS from", "!= test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck'", "[] def test_options_structure(self): \"\"\" A test to ensure that HendrixDeploy.options", "= patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def", "in sys.path) sys.path = [p for p in sys.path if", "sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name,", "ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options", "options['traceback'] = True stdout = sys.stdout stderr = sys.stderr redirect", "wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options =", "def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path})", "self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options", "popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv", "self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self):", "self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule')", "super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity' with", "= self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def", "test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path = [p for p", "os import sys from . import HendrixTestCase, TEST_SETTINGS from hendrix.contrib", "= os.getcwd() _path = sys.path sys.path = [p for p", "Process(object): def poll(self): return 0 with patch('time.sleep'): with patch('subprocess.Popen') as", "findSettingsMod.return_value = django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '') options =", "= \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity'", "'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'],", "options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path", "options) def test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] = True self.assertFalse(options['reload'])", "= self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys = deploy.options.keys() self.assertListEqual(expected_keys, actual_keys)", "from mock import patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp()", "as findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self):", "= os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path", "self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS", "options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True options['traceback']", ") options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path =", "= self.DEFAULTS options['quiet'] = True options['daemonize'] = True stdout =", "sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None)", "'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path", "True options['traceback'] = True stdout = sys.stdout stderr = sys.stderr", "class Process(object): def poll(self): return 0 with patch('time.sleep'): with patch('subprocess.Popen')", "= True options['daemonize'] = True stdout = sys.stdout stderr =", "complete set of options available \"\"\" deploy = self.wsgiDeploy() expected_keys", "self.args_list + ['-d', '--settings', TEST_SETTINGS] class Process(object): def poll(self): return", "def test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud'])", "sys.path = [p for p in _path if p !=", "HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError from hendrix.options import options", "hendrix import ux from mock import patch class TestMain(HendrixTestCase): def", "= self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject,", "Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = [] def", "test_shitty_pythonpath(self): options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path", "not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self): options", "in sys.path) def test_pythonpath(self): options = self.DEFAULTS test_path = os.path.join(", "self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with", "self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd = os.getcwd() _path =", "self.devnull = open(os.devnull, 'w') self.args_list = ['hx', 'start'] self.patcher =", "with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS", "cwd = os.getcwd() _path = sys.path sys.path = [p for", "django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings", "True options['daemonize'] = True options['traceback'] = True stdout = sys.stdout", "for p in sys.path if p != test_path] def test_shitty_pythonpath(self):", "def setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = ''", "options = self.DEFAULTS user_settings = 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'],", "self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd =", "redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def", "self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop()", "os.getcwd() _path = sys.path sys.path = [p for p in", "+ ['-d', '--settings', TEST_SETTINGS] class Process(object): def poll(self): return 0", "user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path =", "also has the complete set of options available \"\"\" deploy", "options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi'", "self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings", "as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '')", "deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys = deploy.options.keys() self.assertListEqual(expected_keys,", "True stdout = sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options)", "ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv", "self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options =", "self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as", "from . import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError from", "\"\"\" A test to ensure that HendrixDeploy.options also has the", "def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] =", "test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True", "options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS", "TEST_SETTINGS from hendrix.contrib import SettingsError from hendrix.options import options as", "self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self):", "as hx_options from hendrix import ux from mock import patch", "popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv =", "= '' self.devnull = open(os.devnull, 'w') self.args_list = ['hx', 'start']", "sys.path) def test_pythonpath(self): options = self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()),", "test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath']", "self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path", "= self.DEFAULTS user_settings = 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings)", "= 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options)", "sys.path sys.path = [p for p in _path if p", "sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS] class Process(object): def", "in _path if p != cwd] self.assertTrue(cwd not in sys.path)", "ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self): options = self.DEFAULTS test_path", "self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv =", "poll(self): return 0 with patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value", "self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self):", "os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in", "self.DEFAULTS options['quiet'] = True options['daemonize'] = True options['traceback'] = True", "django_settings options = self.DEFAULTS user_settings = 'myproject.settings' options['settings'] = user_settings", "wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi':", "findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings", "sys.argv = [] def test_options_structure(self): \"\"\" A test to ensure", "return 0 with patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value =", "test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True", ". import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError from hendrix.options", "hendrix.options import options as hx_options from hendrix import ux from", "\"\" self.assertRaises(SettingsError, ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with", "def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path})", "def test_shitty_pythonpath(self): options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] =", "options = self.DEFAULTS options['quiet'] = True options['daemonize'] = True stdout", "options['daemonize'] = True stdout = sys.stdout stderr = sys.stderr redirect", "options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings)", "= self.DEFAULTS options['quiet'] = True options['daemonize'] = True options['traceback'] =", "def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] = True options['daemonize'] =", "True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self):", "ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value", "options = self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath']", "test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError, ux.djangoVsWsgi,", "None) def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] = True options['daemonize']", "'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close()", "ux from mock import patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain,", "set of options available \"\"\" deploy = self.wsgiDeploy() expected_keys =", "p in sys.path if p != test_path] def test_shitty_pythonpath(self): options", "= '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options)", "if p != test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS test_path", "options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud'])", "p in _path if p != cwd] self.assertTrue(cwd not in", "import ux from mock import patch class TestMain(HendrixTestCase): def setUp(self):", "self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet']", "p != cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in", "self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] = True options['daemonize']", "= ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self):", "hx_options from hendrix import ux from mock import patch class", "import SettingsError from hendrix.options import options as hx_options from hendrix", "= self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options)", "'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options =", "self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull, 'w')", "test_options_structure(self): \"\"\" A test to ensure that HendrixDeploy.options also has", "'') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule')", "A test to ensure that HendrixDeploy.options also has the complete", "self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] = True", "mock import patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS", "self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value =", "in sys.path if p != test_path] def test_shitty_pythonpath(self): options =", "as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS user_settings =", "options = self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings)", "wsgi_dot_path = '_this.leads.nowhere.man' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi,", "= sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name)", "= django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options)", "_path if p != cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS)", "self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv = self.args_list +", "class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE']", "test_cwd_exposure(self): cwd = os.getcwd() _path = sys.path sys.path = [p", "def test_options_structure(self): \"\"\" A test to ensure that HendrixDeploy.options also", "!= cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path)", "['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown()", "_path = sys.path sys.path = [p for p in _path", "self.assertEqual(redirect, None) def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] = True", "= 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options)", "= django_settings options = self.DEFAULTS user_settings = 'myproject.settings' options['settings'] =", "test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value =", "= True options['daemonize'] = True options['traceback'] = True stdout =", "patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS self.assertEqual(options['settings'],", "stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv = self.args_list", "True options['daemonize'] = True stdout = sys.stdout stderr = sys.stderr", "ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = [] def test_options_structure(self):", "def test_pythonpath(self): options = self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject'", "def test_settings_from_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value", "self.DEFAULTS user_settings = 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options", "from hendrix.options import options as hx_options from hendrix import ux", "ux.devFriendly(options) self.assertTrue(options['reload']) self.assertTrue(options['loud']) def test_noise_control_daemonize(self): options = self.DEFAULTS options['quiet'] =", "def test_main_with_daemonize(self): sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS] class", "= [] def test_options_structure(self): \"\"\" A test to ensure that", "def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = \"\" self.assertRaises(SettingsError,", "options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path) def test_wsgi_wrong_path_raises(self): wsgi_dot_path = '_this.leads.nowhere.man'", "test_main_with_daemonize(self): sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS] class Process(object):", "0 with patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value = Process()", "findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '') options", "that HendrixDeploy.options also has the complete set of options available", "'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'],", "= self.args_list + ['-d', '--settings', TEST_SETTINGS] class Process(object): def poll(self):", "available \"\"\" deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys =", "test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options", "= ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_main_with_daemonize(self):", "import options as hx_options from hendrix import ux from mock", "def poll(self): return 0 with patch('time.sleep'): with patch('subprocess.Popen') as popen:", "= self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def", "['-d', '--settings', TEST_SETTINGS] class Process(object): def poll(self): return 0 with", "hendrix.contrib import SettingsError from hendrix.options import options as hx_options from", "open(os.devnull, 'w') self.args_list = ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start()", "p != test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS test_path =", "user_settings = 'myproject.settings' options['settings'] = user_settings self.assertEqual(options['settings'], user_settings) options =", "= Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = []", "None) def test_main_with_daemonize(self): sys.argv = self.args_list + ['-d', '--settings', TEST_SETTINGS]", "'/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options", "patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options = self.DEFAULTS user_settings", "options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) options = ux.djangoVsWsgi(options) self.assertEqual(options['wsgi'], wsgi_dot_path)", "patch('subprocess.Popen') as popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in", "tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self): django_settings = 'django.inanity'", "stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name)", "test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path ux.exposeProject(options)", "self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options = ux.devFriendly(options) self.assertTrue(options['reload'])", "ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self): wsgi_dot_path = 'hendrix.test.wsgi' options =", "options available \"\"\" deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys", "ux.djangoVsWsgi, self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as", "test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value =", "options['pythonpath'] = test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path = [p", "= True options['traceback'] = True stdout = sys.stdout stderr =", "the complete set of options available \"\"\" deploy = self.wsgiDeploy()", "'' self.devnull = open(os.devnull, 'w') self.args_list = ['hx', 'start'] self.patcher", "super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull =", "patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain, self).tearDown() self.devnull.close() self.patcher.stop() def test_settings_from_system_variable(self):", "options) def test_cwd_exposure(self): cwd = os.getcwd() _path = sys.path sys.path", "test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options) def", "'--settings', TEST_SETTINGS] class Process(object): def poll(self): return 0 with patch('time.sleep'):", "has the complete set of options available \"\"\" deploy =", "ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path = [p for p in", "self.DEFAULTS) def test_user_settings_overrides_system_variable(self): django_settings = 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod:", "\"\"\" deploy = self.wsgiDeploy() expected_keys = self.DEFAULTS.keys() actual_keys = deploy.options.keys()", "as popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0])", "ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd = os.getcwd() _path = sys.path", "import patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS =", "stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options = self.DEFAULTS options['quiet'] =", "import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError from hendrix.options import", "sys from . import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import SettingsError", "self.assertTrue(test_path in sys.path) sys.path = [p for p in sys.path", "sys.path if p != test_path] def test_shitty_pythonpath(self): options = self.DEFAULTS", "wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self): cwd = os.getcwd() _path", "= open(os.devnull, 'w') self.args_list = ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule')", "'w') self.args_list = ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def", "in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self): options =", "self.args_list = ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self):", "= [p for p in sys.path if p != test_path]", "patch class TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options()", "self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options = self.DEFAULTS options['dev'] =", "import sys from . import HendrixTestCase, TEST_SETTINGS from hendrix.contrib import", "options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as", "options['pythonpath'] = test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options =", "= ['hx', 'start'] self.patcher = patch('hendrix.ux.findSettingsModule') self.patcher.start() def tearDown(self): super(TestMain,", "sys.path = [p for p in sys.path if p !=", "django_settings options = self.DEFAULTS self.assertEqual(options['settings'], '') options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'],", "= ux.djangoVsWsgi(options) self.assertEqual(options['settings'], django_settings) def test_settings_wsgi_absense(self): with patch('hendrix.ux.findSettingsModule') as findSettingsMod:", "= hx_options() os.environ['DJANGO_SETTINGS_MODULE'] = '' self.devnull = open(os.devnull, 'w') self.args_list", "options = self.DEFAULTS options['dev'] = True self.assertFalse(options['reload']) self.assertFalse(options['loud']) options =", "= 'django.inanity' with patch('hendrix.ux.findSettingsModule') as findSettingsMod: findSettingsMod.return_value = django_settings options", "self.DEFAULTS options['quiet'] = True options['daemonize'] = True stdout = sys.stdout", "ensure that HendrixDeploy.options also has the complete set of options", "TestMain(HendrixTestCase): def setUp(self): super(TestMain, self).setUp() self.DEFAULTS = hx_options() os.environ['DJANGO_SETTINGS_MODULE'] =", "self.assertEqual(sys.stdout.name, stdout.name) self.assertEqual(sys.stderr.name, stderr.name) self.assertEqual(redirect, None) def test_noise_control_traceback(self): options =", "self.assertEqual(redirect, None) def test_main_with_daemonize(self): sys.argv = self.args_list + ['-d', '--settings',", "cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def", "= test_path ux.exposeProject(options) self.assertTrue(test_path in sys.path) sys.path = [p for", "self.assertTrue(popen.called) self.assertTrue('--settings' in popen.call_args[0][0]) sys.argv = [] def test_options_structure(self): \"\"\"", "options = self.DEFAULTS options.update({'wsgi': wsgi_dot_path}) self.assertRaises(ImportError, ux.djangoVsWsgi, options) def test_cwd_exposure(self):", "from hendrix.contrib import SettingsError from hendrix.options import options as hx_options", "if p != cwd] self.assertTrue(cwd not in sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd", "user_settings self.assertEqual(options['settings'], user_settings) options = ux.djangoVsWsgi(options) self.assertEqual(options['settings'], user_settings) def test_wsgi_correct_wsgi_path_works(self):", "to ensure that HendrixDeploy.options also has the complete set of", "self.assertTrue(cwd in sys.path) def test_pythonpath(self): options = self.DEFAULTS test_path =", "= test_path self.assertRaises(IOError, ux.exposeProject, options) def test_dev_friendly_options(self): options = self.DEFAULTS", "self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] = test_path", "TEST_SETTINGS] class Process(object): def poll(self): return 0 with patch('time.sleep'): with", "options = self.DEFAULTS test_path = '/if/u/have/this/path/you/suck' options['pythonpath'] = test_path self.assertRaises(IOError,", "= [p for p in _path if p != cwd]", "options['quiet'] = True options['daemonize'] = True stdout = sys.stdout stderr", "HendrixDeploy.options also has the complete set of options available \"\"\"", "patch('time.sleep'): with patch('subprocess.Popen') as popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called)", "stdout = sys.stdout stderr = sys.stderr redirect = ux.noiseControl(options) self.assertEqual(sys.stdout.name,", "[p for p in sys.path if p != test_path] def", "with patch('subprocess.Popen') as popen: popen.return_value = Process() ux.main() self.assertTrue(popen.called) self.assertTrue('--settings'", "sys.path) ux.exposeProject(self.DEFAULTS) self.assertTrue(cwd in sys.path) def test_pythonpath(self): options = self.DEFAULTS", "options['quiet'] = True options['daemonize'] = True options['traceback'] = True stdout", "test to ensure that HendrixDeploy.options also has the complete set", "= self.DEFAULTS test_path = os.path.join( os.path.dirname(os.getcwd()), 'hendrix/test/testproject' ) options['pythonpath'] =" ]
[ "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "notice shall be included in all copies or substantial portions", "InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str", "ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ]", "IN THE SOFTWARE. \"\"\" from __future__ import annotations from typing", "description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name: str", "TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType = Literal[1, 2,", "class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6,", "from .embed import Embed from .channel import ChannelType, Channel from", "user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type:", "3, 4, 5, 6, 7, 8, 9, 10] class ApplicationCommandOption(TypedDict):", "str version: int resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False):", "3] class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name: str description:", ".role import Role from .user import User if TYPE_CHECKING: from", "AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3] class ApplicationCommand(TypedDict): id:", "Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class", "Snowflake InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str", "BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id:", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from __future__", "channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id:", "Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from .snowflake import", "class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType", "ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str name: str class", "name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class", "Snowflake channel_id: Snowflake member: Member user: User message: Message guild_locale:", "Snowflake application_id: Snowflake type: InteractionType token: str version: int resolved:", "class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions):", "int value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components:", "type: InteractionType token: str version: int resolved: InteractionResolved locale: str", "InteractionType token: str version: int resolved: InteractionResolved locale: str class", "if TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType = Literal[1,", "choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str,", "TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from .snowflake import Snowflake", "Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int]", "Software without restriction, including without limitation the rights to use,", "import Role from .user import User if TYPE_CHECKING: from .message", "str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name: str default_permission:", "Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type:", "_InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict):", "embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType =", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType = Literal[1, 4,", "to deal in the Software without restriction, including without limitation", "3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2]", "str type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member]", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "member: Member user: User message: Message guild_locale: str class Interaction(_InteractionOptional):", "class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString,", "DEALINGS IN THE SOFTWARE. \"\"\" from __future__ import annotations from", "InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str user:", "options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3, 4,", "_InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake member: Member", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values: List[str] target_id: Snowflake components:", "type: ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users:", "_EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional):", "2, 3, 4, 5, 6, 7, 8, 9, 10] class", "int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type:", "str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members:", "description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict):", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id:", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options:", "modify, merge, publish, distribute, sublicense, and/or sell copies of the", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "Component, SelectOption from .embed import Embed from .channel import ChannelType,", "persons to whom the Software is furnished to do so,", "limitation the rights to use, copy, modify, merge, publish, distribute,", "subject to the following conditions: The above copyright notice and", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "Channel from .member import Member from .role import Role from", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1,", "float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber,", "type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand,", "str class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType token:", "(MIT) Copyright (c) 2015-2021 Rapptz Permission is hereby granted, free", "type: ApplicationCommandOptionType name: str description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]]", "bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components:", "InteractionResponseType = Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False):", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int", "Software is furnished to do so, subject to the following", ".member import Member from .role import Role from .user import", "options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved:", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "import AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3] class ApplicationCommand(TypedDict):", "_ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3]", "import ChannelType, Channel from .member import Member from .role import", "from .channel import ChannelType, Channel from .member import Member from", "Dict, TypedDict, Union, List, Literal from .snowflake import Snowflake from", "sell copies of the Software, and to permit persons to", "GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1, 2, 3]", "Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str,", "included in all copies or substantial portions of the Software.", "str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name:", "id: Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions]", "List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType = Literal[1,", "ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake,", "import Component, SelectOption from .embed import Embed from .channel import", "List[Component] InteractionResponseType = Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict,", "ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake,", "Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9]", "from .snowflake import Snowflake from .components import Component, SelectOption from", "id: Snowflake type: ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict,", "Union, List, Literal from .snowflake import Snowflake from .components import", "7, 8, 9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str", "5, 6, 7, 8, 9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType", "Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", "component_type: int values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional):", "furnished to do so, subject to the following conditions: The", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "import User if TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType", "value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake,", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "publish, distribute, sublicense, and/or sell copies of the Software, and", "Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int", "\"Software\"), to deal in the Software without restriction, including without", "_ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal", "typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from", "Literal[6, 7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10]", "name: str value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class", "_ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger,", "class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake", "2, 3] class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name: str", "_ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption):", "user: User message: Message guild_locale: str class Interaction(_InteractionOptional): id: Snowflake", "be included in all copies or substantial portions of the", "Snowflake type: InteractionType token: str version: int resolved: InteractionResolved locale:", "InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class", "from .member import Member from .role import Role from .user", "members: List[Member] roles: List[Role] channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict,", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "__future__ import annotations from typing import Optional, TYPE_CHECKING, Dict, TypedDict,", "Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool", "Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData", "messages: List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id:", "channel_id: Snowflake member: Member user: User message: Message guild_locale: str", "from .components import Component, SelectOption from .embed import Embed from", "Member from .role import Role from .user import User if", "Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake", "type: InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description:", "type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8,", "class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str name: str", "InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions", "import Member from .role import Role from .user import User", "the following conditions: The above copyright notice and this permission", "files (the \"Software\"), to deal in the Software without restriction,", "InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name:", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "TypedDict, Union, List, Literal from .snowflake import Snowflake from .components", "data: InteractionData guild_id: Snowflake channel_id: Snowflake member: Member user: User", "the rights to use, copy, modify, merge, publish, distribute, sublicense,", "The MIT License (MIT) Copyright (c) 2015-2021 Rapptz Permission is", "users: List[Union[User, Member]] members: List[Member] roles: List[Role] channels: List[Channel] messages:", "software and associated documentation files (the \"Software\"), to deal in", "ApplicationCommandOptionType name: str description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options:", "notice and this permission notice shall be included in all", "import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List, Literal from .snowflake", "Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str", "= Literal[1, 4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data:", "is hereby granted, free of charge, to any person obtaining", "Literal from .snowflake import Snowflake from .components import Component, SelectOption", ".embed import Embed from .channel import ChannelType, Channel from .member", "class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description: str required: bool", "THE SOFTWARE. \"\"\" from __future__ import annotations from typing import", "Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7,", "value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption):", "Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value: Optional[str]", "locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds:", "import Embed from .channel import ChannelType, Channel from .member import", "Embed from .channel import ChannelType, Channel from .member import Member", "to the following conditions: The above copyright notice and this", "conditions: The above copyright notice and this permission notice shall", "the Software without restriction, including without limitation the rights to", "target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name: str", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "List[Union[User, Member]] members: List[Member] roles: List[Role] channels: List[Channel] messages: List[Message]", "and/or sell copies of the Software, and to permit persons", "str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption):", "permit persons to whom the Software is furnished to do", "InteractionData(_InteractionDataOptional): id: Snowflake name: str type: ApplicationCommandType class InteractionResolved(TypedDict): users:", "\"\"\" The MIT License (MIT) Copyright (c) 2015-2021 Rapptz Permission", "class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5]", "do so, subject to the following conditions: The above copyright", "id: Snowflake application_id: Snowflake name: str description: str options: Optional[List[ApplicationCommandOption]]", "roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str", "any person obtaining a copy of this software and associated", "id: Snowflake application_id: Snowflake type: InteractionType token: str version: int", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict):", "id: Snowflake name: str type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User,", "type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member] roles:", "Snowflake guild_id: Snowflake InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict):", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name:", "List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name:", "copy of this software and associated documentation files (the \"Software\"),", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "Literal[1, 2, 3] class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name:", "including without limitation the rights to use, copy, modify, merge,", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from __future__ import", "roles: List[Role] channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False): data:", "List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", ".components import Component, SelectOption from .embed import Embed from .channel", "type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6,", "Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class", "guild_locale: str class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType", "User message: Message guild_locale: str class Interaction(_InteractionOptional): id: Snowflake application_id:", "class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value: Optional[str] # Optional[ApplicationCommandOptionType]", "4, 5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class", "Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type:", "without limitation the rights to use, copy, modify, merge, publish,", "(c) 2015-2021 Rapptz Permission is hereby granted, free of charge,", "class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles:", "from .message import AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3]", "Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values: List[str]", "annotations from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union, List,", "restriction, including without limitation the rights to use, copy, modify,", "bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value:", "InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member] roles: List[Role] channels: List[Channel]", "9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description: str", "to permit persons to whom the Software is furnished to", "guild_id: Snowflake InteractionType = Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name:", "type: Literal[6, 7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type:", "7, 8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value:", "name: str type: int value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption]", "Literal[10] value: float ApplicationCommandInteractionDataOption = Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3,", "components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name: str type: ApplicationCommandType", "PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType", "str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type:", "] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str name:", "content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component]", "description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2,", "application_id: Snowflake type: InteractionType token: str version: int resolved: InteractionResolved", "data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "Snowflake type: InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict, total=False):", "deal in the Software without restriction, including without limitation the", "10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description: str required:", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "List, Literal from .snowflake import Snowflake from .components import Component,", "guild_id: Snowflake channel_id: Snowflake member: Member user: User message: Message", "class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name: str description: str", "2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class", "type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool", "Snowflake application_id: Snowflake name: str description: str options: Optional[List[ApplicationCommandOption]] type:", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str user: User class", "distribute, sublicense, and/or sell copies of the Software, and to", "class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value: Snowflake class", "total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name:", "from __future__ import annotations from typing import Optional, TYPE_CHECKING, Dict,", "_ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions:", "Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id:", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class", "= Literal[1, 2, 3] class ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake", "User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType", "class InteractionData(_InteractionDataOptional): id: Snowflake name: str type: ApplicationCommandType class InteractionResolved(TypedDict):", "name: str type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]] members:", "application_id: Snowflake name: str description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType]", "the Software, and to permit persons to whom the Software", "type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions):", "and associated documentation files (the \"Software\"), to deal in the", "AllowedMentions flags: int components: List[Component] InteractionResponseType = Literal[1, 4, 5,", "Copyright (c) 2015-2021 Rapptz Permission is hereby granted, free of", "= Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class", "Snowflake member: Member user: User message: Message guild_locale: str class", "class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options:", "List[Role] channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "to whom the Software is furnished to do so, subject", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "4, 5, 6, 7, 8, 9, 10] class ApplicationCommandOption(TypedDict): type:", "class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4]", "2015-2021 Rapptz Permission is hereby granted, free of charge, to", "import annotations from typing import Optional, TYPE_CHECKING, Dict, TypedDict, Union,", "int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type:", "message: Message guild_locale: str class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str", "str value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict):", "User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel]", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "5, 6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional):", "total=False): tts: bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags:", "Message ApplicationCommandType = Literal[1, 2, 3] class ApplicationCommand(TypedDict): id: Snowflake", "of the Software, and to permit persons to whom the", "this software and associated documentation files (the \"Software\"), to deal", "members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class", "all copies or substantial portions of the Software. THE SOFTWARE", "(the \"Software\"), to deal in the Software without restriction, including", "merge, publish, distribute, sublicense, and/or sell copies of the Software,", "dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values: List[str] target_id:", "6, 7] class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type:", "so, subject to the following conditions: The above copyright notice", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "charge, to any person obtaining a copy of this software", "to do so, subject to the following conditions: The above", "str type: int value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused:", "resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values:", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds: List[Embed]", "_ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption]", "following conditions: The above copyright notice and this permission notice", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "_ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value:", "ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value: Optional[str] #", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5, 6, 7, 8,", "in the Software without restriction, including without limitation the rights", "permission notice shall be included in all copies or substantial", "class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType = Literal[1,", "Member]] members: List[Member] roles: List[Role] channels: List[Channel] messages: List[Message] class", "int components: List[Component] InteractionResponseType = Literal[1, 4, 5, 6, 7]", "value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value:", "SOFTWARE. \"\"\" from __future__ import annotations from typing import Optional,", "str component_type: int values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1,", "class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType token: str", "Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType =", "value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id:", "tts: bool content: str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "Message guild_locale: str class Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type:", "ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description: str required: bool choices:", "Snowflake from .components import Component, SelectOption from .embed import Embed", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "copyright notice and this permission notice shall be included in", "bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class", "= Literal[1, 2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission:", "ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value: Optional[str] # Optional[ApplicationCommandOptionType] options:", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content: str embeds: List[Embed] allowed_mentions:", "and to permit persons to whom the Software is furnished", "class _InteractionResponseOptional(TypedDict, total=False): data: InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class", "copies or substantial portions of the Software. THE SOFTWARE IS", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "= Literal[1, 2, 3] class _ApplicationCommandInteractionDataOption(TypedDict): name: str class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption):", "permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User]", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False):", "from .role import Role from .user import User if TYPE_CHECKING:", "whom the Software is furnished to do so, subject to", "name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake,", "channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict): name: str type: int value:", "_ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption): type: Literal[5] value: bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7,", "type: int value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool]", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "in all copies or substantial portions of the Software. THE", "obtaining a copy of this software and associated documentation files", "custom_id: str component_type: int values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption]", "total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role]", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake member: Member user:", "type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" from __future__ import annotations", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "a copy of this software and associated documentation files (the", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "_ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type:", "str user: User class _EditApplicationCommandOptional(TypedDict, total=False): description: str options: Optional[List[ApplicationCommandOption]]", "sublicense, and/or sell copies of the Software, and to permit", "# Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict,", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption =", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels: Dict[Snowflake, ApplicationCommandResolvedPartialChannel] class ApplicationCommandInteractionDataOption(TypedDict):", "ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions:", "id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType =", "import Snowflake from .components import Component, SelectOption from .embed import", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "this permission notice shall be included in all copies or", "SelectOption from .embed import Embed from .channel import ChannelType, Channel", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "above copyright notice and this permission notice shall be included", "List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name: str type: ApplicationCommandType class", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "8, 9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float", "focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict]", "version: int resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts:", "token: str version: int resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict,", "ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member] roles: List[Role]", "class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1, 2,", "options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int values: List[str] target_id: Snowflake", "permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "_ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id:", "total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type: int", "id: Snowflake type: InteractionType name: str user: User class _EditApplicationCommandOptional(TypedDict,", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "is furnished to do so, subject to the following conditions:", "components: List[Component] InteractionResponseType = Literal[1, 4, 5, 6, 7] class", "ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake,", "bool class _ApplicationCommandInteractionDataOptionSnowflake(_ApplicationCommandInteractionDataOption): type: Literal[6, 7, 8, 9] value: Snowflake", "class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str", "value: Optional[str] # Optional[ApplicationCommandOptionType] options: Optional[ApplicationCommandInteractionDataOption] focused: Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]]", "Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake name: str type:", "to any person obtaining a copy of this software and", "List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake class GuildApplicationCommandPermissions(PartialGuildApplicationCommandPermissions): application_id: Snowflake guild_id:", ".message import AllowedMentions, Message ApplicationCommandType = Literal[1, 2, 3] class", "shall be included in all copies or substantial portions of", "person obtaining a copy of this software and associated documentation", "ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id:", "required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str", "users: Dict[Snowflake, User] members: Dict[Snowflake, Member] roles: Dict[Snowflake, Role] channels:", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake member:", "and this permission notice shall be included in all copies", "flags: int components: List[Component] InteractionResponseType = Literal[1, 4, 5, 6,", "Union[ _ApplicationCommandInteractionDataOptionString, _ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict):", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", ".channel import ChannelType, Channel from .member import Member from .role", "str description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1,", "class MessageInteraction(TypedDict): id: Snowflake type: InteractionType name: str user: User", "List[Member] roles: List[Role] channels: List[Channel] messages: List[Message] class _InteractionOptional(TypedDict, total=False):", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "List[Message] class _InteractionOptional(TypedDict, total=False): data: InteractionData guild_id: Snowflake channel_id: Snowflake", "int values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id:", "free of charge, to any person obtaining a copy of", "Member user: User message: Message guild_locale: str class Interaction(_InteractionOptional): id:", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "options: Optional[List[ApplicationCommandOption]] type: ApplicationCommandType class EditApplicationCommand(_EditApplicationCommandOptional): name: str default_permission: bool", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type:", "Snowflake name: str description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType", "str description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]] class", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "the Software is furnished to do so, subject to the", "Software, and to permit persons to whom the Software is", "8, 9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name: str description:", "permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class PartialGuildApplicationCommandPermissions(BaseGuildApplicationCommandPermissions): id: Snowflake", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "Role from .user import User if TYPE_CHECKING: from .message import", "ApplicationCommandType = Literal[1, 2, 3] class ApplicationCommand(TypedDict): id: Snowflake application_id:", "_ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake type: ChannelType permissions: str", "InteractionData guild_id: Snowflake channel_id: Snowflake member: Member user: User message:", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Snowflake type: ApplicationCommandPermissionType permission: bool class BaseGuildApplicationCommandPermissions(TypedDict): permissions: List[ApplicationCommandPermissions] class", "_ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value:", "MIT License (MIT) Copyright (c) 2015-2021 Rapptz Permission is hereby", "License (MIT) Copyright (c) 2015-2021 Rapptz Permission is hereby granted,", "6, 7, 8, 9, 10] class ApplicationCommandOption(TypedDict): type: ApplicationCommandOptionType name:", "InteractionApplicationCommandCallbackData class InteractionResponse(_InteractionResponseOptional): type: InteractionResponseType class MessageInteraction(TypedDict): id: Snowflake type:", "documentation files (the \"Software\"), to deal in the Software without", "without restriction, including without limitation the rights to use, copy,", "from .user import User if TYPE_CHECKING: from .message import AllowedMentions,", "str embeds: List[Embed] allowed_mentions: AllowedMentions flags: int components: List[Component] InteractionResponseType", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption]", "_InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options: List[ApplicationCommandInteractionDataOption] custom_id: str component_type:", "value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption): type: Literal[4] value: int class _ApplicationCommandInteractionDataOptionBoolean(_ApplicationCommandInteractionDataOption):", "Interaction(_InteractionOptional): id: Snowflake application_id: Snowflake type: InteractionType token: str version:", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "Snowflake type: ChannelType permissions: str name: str class ApplicationCommandInteractionDataResolved(TypedDict, total=False):", "int resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool", "_ApplicationCommandInteractionDataOptionInteger, _ApplicationCommandInteractionDataOptionSubcommand, _ApplicationCommandInteractionDataOptionBoolean, _ApplicationCommandInteractionDataOptionSnowflake, _ApplicationCommandInteractionDataOptionNumber, ] class ApplicationCommandResolvedPartialChannel(TypedDict): id: Snowflake", ".snowflake import Snowflake from .components import Component, SelectOption from .embed", "9] value: Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "values: List[str] target_id: Snowflake components: List[ApplicationCommandInteractionDataOption] class InteractionData(_InteractionDataOptional): id: Snowflake", "Optional[bool] components: Optional[List[ApplicationCommandInteractionDataOption]] class _InteractionDataOptional(TypedDict, total=False): resolved: Dict[str, dict] options:", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "str class ApplicationCommandInteractionDataResolved(TypedDict, total=False): users: Dict[Snowflake, User] members: Dict[Snowflake, Member]", "options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value: str class _ApplicationCommandInteractionDataOptionInteger(_ApplicationCommandInteractionDataOption):", "granted, free of charge, to any person obtaining a copy", "options: Optional[List[ApplicationCommandOption]] class ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType", "Snowflake name: str type: ApplicationCommandType class InteractionResolved(TypedDict): users: List[Union[User, Member]]", "class InteractionResolved(TypedDict): users: List[Union[User, Member]] members: List[Member] roles: List[Role] channels:", "application_id: Snowflake guild_id: Snowflake InteractionType = Literal[1, 2, 3] class", "name: str description: str required: bool choices: Optional[List[ApplicationCommandOptionChoice]] options: Optional[List[ApplicationCommandOption]]", "of charge, to any person obtaining a copy of this", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "ApplicationCommand(TypedDict): id: Snowflake application_id: Snowflake name: str description: str options:", "type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type: Literal[3] value:", "Permission is hereby granted, free of charge, to any person", "resolved: InteractionResolved locale: str class InteractionApplicationCommandCallbackData(TypedDict, total=False): tts: bool content:", "ApplicationCommandOptionChoice(TypedDict): name: str value: Union[str, int] ApplicationCommandPermissionType = Literal[1, 2]", "The above copyright notice and this permission notice shall be", "\"\"\" from __future__ import annotations from typing import Optional, TYPE_CHECKING,", "name: str description: str options: Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType =", "Snowflake class _ApplicationCommandInteractionDataOptionNumber(_ApplicationCommandInteractionDataOption): type: Literal[10] value: float ApplicationCommandInteractionDataOption = Union[", "Rapptz Permission is hereby granted, free of charge, to any", "ChannelType, Channel from .member import Member from .role import Role", "= Literal[1, 2, 3, 4, 5, 6, 7, 8, 9,", "User if TYPE_CHECKING: from .message import AllowedMentions, Message ApplicationCommandType =", "Optional[List[ApplicationCommandOption]] type: Optional[ApplicationCommandType] ApplicationCommandOptionType = Literal[1, 2, 3, 4, 5,", ".user import User if TYPE_CHECKING: from .message import AllowedMentions, Message", "associated documentation files (the \"Software\"), to deal in the Software", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" from", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "2] class ApplicationCommandPermissions(TypedDict): id: Snowflake type: ApplicationCommandPermissionType permission: bool class", "class _ApplicationCommandInteractionDataOptionSubcommand(_ApplicationCommandInteractionDataOption): type: Literal[1, 2] options: List[ApplicationCommandInteractionDataOption] class _ApplicationCommandInteractionDataOptionString(_ApplicationCommandInteractionDataOption): type:", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "text } async def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params = {", "* from message import server_chan_send class AutoSign(object): def __init__(self, username,", "签到课程, 签到时间, 签到状态 sign_msg = { 'name': d['classname'], 'date': resp['date'],", "url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url ) as", "latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1', 'objectId':", "} async with self.session.request( method='POST', url=url, params=param, data=files ) as", "'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: data", "{ 'courseId': courseid, 'jclassId': classid } async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\",", ") if resp: # 签到课程, 签到时间, 签到状态 sign_msg = {", "import random import asyncio from typing import Optional, List, Dict", "as f: try: # 读取文件 data = json.load(f) if data[activeid]:", "'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT", "cookies with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2) async def", "in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res) return", "= ClientSession(headers=self.headers) self.username = username self.password = password self.schoolid =", "activeid, 'classId': classid, 'courseId': courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign',", "text)[0] if \"签到成功\" not in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid)", "= True json.dump(data, f2) async def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\"", "获取所有课程activeid和签到类型 for i in classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2])", "with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as resp: text", "= json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w') as f2:", "activeid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False )", "'status': text } async def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId", "\"\"\"开始所有签到任务\"\"\" tasks = [] res = [] await self.set_cookies() #", "ClientSession from aiohttp.cookiejar import SimpleCookie from lxml import etree from", "'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like", "'name': d['classname'], 'date': resp['date'], 'status': resp['status'] } res.append(sign_msg) if '失败'", "await self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies status, text, cookie", "with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] = True json.dump(data, f2)", "[] re_rule = r'([\\d]+),2' params = { 'courseId': courseid, 'jclassId':", "self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid, activeid) async def send_sign_result(self,", "async def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in", "json.load(f) cookies = data[self.username] except Exception: return False # 检测cookies是否有效", "await self.general_sign(classid, courseid, activeid) async def send_sign_result(self, results: List[Dict]): \"\"\"", "login_status == 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001", "False # 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as", "class AutoSign(object): def __init__(self, username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers", "BeautifulSoup(text, \"lxml\") course_list = soup.find_all( 'li', class_=\"course\") for course in", "in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(COOKIES_FILE_PATH,", "cookie = await self.login() login_status = await self.check_login_status(status, text) if", "'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '',", "os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\"", "return await self.hand_sign(classid, courseid, activeid) elif \"二维码\" in sign_type: return", "f: # json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f) cookies =", "} async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as", "def login(self): \"\"\" 登录并返回响应 \"\"\" params = { 'name': self.username,", "\"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with", "{ 'puid': uid, '_token': token } async with self.session.request( method='POST',", "is None else schoolid self.enc = '' if enc is", "latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1' }", "'-1', 'fid': '', 'appType': '15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax',", "self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks) for", "schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':", "cookies async def login(self): \"\"\" 登录并返回响应 \"\"\" params = {", "self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self,", "text } async def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId =", "# 读取文件 data = json.load(f) if data[activeid]: return True except", "\"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid) params = { 'name': '',", "longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1', 'objectId': objectId }", "self.login() login_status = await self.check_login_status(status, text) if login_status == 1000:", "if resp.status != 200: print(\"cookie失效\") return None else: print(\"cookie有效!\") return", "= self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000", "self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')} uid", "else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies)", "async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await self.check_cookies() if not", "activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH):", "class_=\"course\") for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表:", "text) if login_status == 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else:", "classid, 'activeId': activeid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params,", "print('课程列表: ', res) return res async def get_sign_type(self, classid, courseid,", "if status == 403: return 1002 data = json.loads(text) if", "'fid': '39037', 'courseId': courseid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\",", "= 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value", "= await resp.text() return { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status':", "self.upload_img(uid) params = { 'name': '', 'activeId': activeid, 'address': '中国',", "} return s async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params =", "import re import time import json import random import asyncio", "self.enc = '' if enc is None else enc async", "text): if status == 403: return 1002 data = json.loads(text)", "await self.check_login_status(status, text) if login_status == 1000: cookies = self.dict_from_simple_cookie(cookie)", "not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\") with", "= len(res) if n: d = {'num': n, 'class': {}}", "self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text = await resp.text()", "token_dict['_token'] async def upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img", "data[activeid] = True json.dump(data, f2) async def get_all_classid(self) -> list:", "= await resp.text() cookies = resp.cookies return status, text, cookies", "'date': resp['date'], 'status': resp['status'] } res.append(sign_msg) if '失败' in resp['status']:", "if n: d = {'num': n, 'class': {}} for i", "\"拍照\" in sign_type: return await self.tphoto_sign(activeid) else: return await self.general_sign(classid,", "{ 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0", "self.schoolid, 'verify': 0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as", "def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in os.listdir(COOKIES_PATH):", "\"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH + random.choice(all_img) # uid =", "return cookies async def login(self): \"\"\" 登录并返回响应 \"\"\" params =", "= {'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await", "== 403: return 1002 data = json.loads(text) if data['result']: return", "for key, value in cookies.items(): result[key] = value.value return result", "'courseId': courseid, 'jclassId': classid } async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False,", "url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await resp.text() soup = BeautifulSoup(text,", "{ 'name': '', 'activeId': activeid, 'address': '中国', 'uid': '', 'clientip':", "'status': title } return s async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\"", "status, text, cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username", "import os import re import time import json import random", "as resp: text = await resp.text() token_dict = json.loads(text) return", "f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies, 没有,则直接返回假", "\"\"\" result = {} for key, value in cookies.items(): result[key]", "async def login(self): \"\"\" 登录并返回响应 \"\"\" params = { 'name':", "i[2]) tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks) for r in", "activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH):", "courseid, activeid): \"\"\"普通签到\"\"\" params = { 'activeId': activeid, 'classId': classid,", "with open(ACTIVEID_FILE_PATH, 'r') as f: data = json.load(f) with open(ACTIVEID_FILE_PATH,", "results: if r is None: continue for d in r['class'].values():", "self.get_all_classid() # 获取所有课程activeid和签到类型 for i in classid_courseId: coroutine = self.get_activeid(i[1],", "= await self.get_all_classid() # 获取所有课程activeid和签到类型 for i in classid_courseId: coroutine", "resp.status != 200: print(\"cookie失效\") return None else: print(\"cookie有效!\") return cookies", "text = await resp.text() cookies = resp.cookies return status, text,", "'', 'latitude': '-1', 'longitude': '-1', 'fid': '', 'appType': '15' }", "{ 'courseId': courseid, 'classId': classid, 'activeId': activeid } async with", "time.localtime()), 'status': text } async def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\"", "'' if schoolid is None else schoolid self.enc = ''", "json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data,", "} async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text =", "'status': resp['status'] } res.append(sign_msg) if '失败' in resp['status']: continue #", "not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\") with", "n, 'class': {}} for i in range(n): if not self.check_activeid(res[i][0]):", "List, Dict from aiohttp import ClientSession from aiohttp.cookiejar import SimpleCookie", "else enc async def check_login_status(self, status, text): if status ==", "await resp.text() h = etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type", "return self.tphoto_sign(activeid) else: s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()),", "return 1002 data = json.loads(text) if data['result']: return 1000 #", "# 如果出错,则表示没有此activeid return False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid +=", "True json.dump(data, f2) async def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res", "= self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks)", "re import time import json import random import asyncio from", "result def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as", "token = await self.get_token() param = { 'puid': uid, '_token':", "{}} for i in range(n): if not self.check_activeid(res[i][0]): d['class'][i] =", "open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] = True json.dump(data, f2) async", "= '' if schoolid is None else schoolid self.enc =", "activeid: continue sign_type = await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0]))", "= await resp.text() soup = BeautifulSoup(text, \"lxml\") course_list = soup.find_all(", "url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as resp: text = await resp.text()", "import * from message import server_chan_send class AutoSign(object): def __init__(self,", "await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型", "text = await resp.text() soup = BeautifulSoup(text, \"lxml\") course_list =", "f2: data[activeid] = True json.dump(data, f2) async def get_all_classid(self) ->", "'', 'appType': '15', 'ifTiJiao': '1' } async with self.session.request( method=\"GET\",", "resp.text() res_dict = json.loads(text) return res_dict['objectId'] async def send_sign_request(self, classid,", "d in r['class'].values(): resp = await self.send_sign_request( d['classid'], d['courseid'], d['activeid'],", "classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule = r'([\\d]+),2' params =", "i[0], i[2]) tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks) for r", "uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param = {", "open(COOKIES_FILE_PATH, \"r\") as f: data = json.load(f) data[self.username] = cookies", "\"位置\" in sign_type: return await self.addr_sign(activeid) elif \"拍照\" in sign_type:", "as f: # json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f) cookies", "url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text = await resp.text() h =", "token_dict = json.loads(text) return token_dict['_token'] async def upload_img(self, uid): \"\"\"上传图片\"\"\"", "{ 'activeId': activeid, 'classId': classid, 'fid': '39037', 'courseId': courseid }", "open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f:", "with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await resp.text() soup", "resp: # 签到课程, 签到时间, 签到状态 sign_msg = { 'name': d['classname'],", "= [] res = [] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId", "'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1', 'objectId': objectId", "etree from bs4 import BeautifulSoup from config import * from", "coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict] = await", "tasks.append(coroutine) results: List[Dict] = await asyncio.gather(*tasks) for r in results:", "\"activeid.json\" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\")", "cookies.items(): result[key] = value.value return result def save_cookies(self, cookies: dict):", "courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1] } return d", "IMAGE_PATH + random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload'", "self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param = { 'puid': uid,", "except BaseException: # 如果出错,则表示没有此activeid return False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\"", "len(res) if n: d = {'num': n, 'class': {}} for", "def __init__(self, username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = {", "= json.loads(text) return token_dict['_token'] async def upload_img(self, uid): \"\"\"上传图片\"\"\" #", "', res) return res async def get_sign_type(self, classid, courseid, activeid):", "method='POST', url=url, params=param, data=files ) as resp: text = await", "course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res) return res async def get_sign_type(self,", "'r') as f: # json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f)", "f: try: # 读取文件 data = json.load(f) if data[activeid]: return", "\"\"\" 发送签到结果 \"\"\" await server_chan_send(results, self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\"", "return res async def get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params", "d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if resp: # 签到课程, 签到时间,", "self.schoolid = '' if schoolid is None else schoolid self.enc", "sign_type = await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n =", "except Exception as e: os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img)", "from bs4 import BeautifulSoup from config import * from message", "else schoolid self.enc = '' if enc is None else", "!= 200: print(\"cookie失效\") return None else: print(\"cookie有效!\") return cookies async", "from message import server_chan_send class AutoSign(object): def __init__(self, username, password,", "= json.loads(text) return res_dict['objectId'] async def send_sign_request(self, classid, courseid, activeid,", "resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async def close_session(self):", "as f: data = json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH,", "async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params = { 'enc': self.enc,", "resp: text = await resp.text() token_dict = json.loads(text) return token_dict['_token']", "网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s = { 'date': time.strftime(\"%m-%d %H:%M\",", "'classid': classid, 'courseid': courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1]", "Exception as e: os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img) ==", "typing import Optional, List, Dict from aiohttp import ClientSession from", "{ 'activeId': activeid, 'classId': classid, 'courseId': courseid } async with", "url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if resp.status != 200: print(\"cookie失效\")", "-> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH,", "lxml import etree from bs4 import BeautifulSoup from config import", "= await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if resp:", "params = { 'activeId': activeid, 'classId': classid, 'fid': '39037', 'courseId':", "return d async def general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\" params", "\"\"\" 登录并返回响应 \"\"\" params = { 'name': self.username, 'pwd': <PASSWORD>,", "\"\"\"普通签到\"\"\" params = { 'activeId': activeid, 'classId': classid, 'fid': '39037',", "in cookies.items(): result[key] = value.value return result def save_cookies(self, cookies:", "os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH) all_img = 0 if", "async def check_login_status(self, status, text): if status == 403: return", "import asyncio from typing import Optional, List, Dict from aiohttp", "with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2) async def check_cookies(self)", "check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if \"activeid.json\" not in", "async def send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果 \"\"\" await server_chan_send(results,", "self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if resp: # 签到课程,", "time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def addr_sign(self, activeid):", "as f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies,", "json.dump(data, f2) async def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res =", "if login_status == 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return", "with open(ACTIVEID_FILE_PATH, 'r') as f: try: # 读取文件 data =", "files = {'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token =", "10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session", "await resp.text() res_dict = json.loads(text) return res_dict['objectId'] async def send_sign_request(self,", "\"手势\" in sign_type: return await self.hand_sign(classid, courseid, activeid) elif \"二维码\"", "def dict_from_simple_cookie(self, cookies) -> dict: \"\"\" 从响应对象中抽取cookies \"\"\" result =", "return None else: print(\"cookie有效!\") return cookies async def login(self): \"\"\"", ") as resp: text = await resp.text() token_dict = json.loads(text)", "sign_type async def get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res =", "} async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp: text", "= '' if enc is None else enc async def", "无效则重新登录,并保存cookies status, text, cookie = await self.login() login_status = await", "def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid) params", "courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in sign_type: return await", "courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule = r'([\\d]+),2' params", "self.session.request( method='GET', url=url ) as resp: text = await resp.text()", "courseid, activeid) async def send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果 \"\"\"", "data = json.loads(text) if data['result']: return 1000 # 登录成功 else:", "save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if \"activeid.json\" not in", "async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text = await", "data = json.load(f) if data[activeid]: return True except BaseException: #", "'39037', 'courseId': courseid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params,", "'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: try:", "with self.session.request( method='GET', url=url ) as resp: text = await", "\"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as", "url=url, params=param, data=files ) as resp: text = await resp.text()", "check_login_status(self, status, text): if status == 403: return 1002 data", "course_list = soup.find_all( 'li', class_=\"course\") for course in course_list: res.append((course.attrs['courseid'],", "+= self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+')", "'status': text } async def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params =", "schoolid self.enc = '' if enc is None else enc", "import SimpleCookie from lxml import etree from bs4 import BeautifulSoup", "self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text = await resp.text() h", "in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH,", "return False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if", "\"\"\"获取签到类型\"\"\" params = { 'activeId': activeid, 'classId': classid, 'courseId': courseid", "f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies, 没有,则直接返回假 try:", "method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as resp: text = await", "res = [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text", "# 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型 for i", "= [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text =", "uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except Exception", "# json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f) cookies = data[self.username]", "} async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text", "img = IMAGE_PATH + random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url", "# 签到课程, 签到时间, 签到状态 sign_msg = { 'name': d['classname'], 'date':", "self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as resp: text =", "cookies = resp.cookies return status, text, cookies def check_activeid(self, activeid):", "async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp: text", "= { 'activeId': activeid, 'classId': classid, 'courseId': courseid } async", "= { 'name': '', 'activeId': activeid, 'address': '中国', 'uid': '',", "if len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH", "self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = [] res =", "activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list: activeid = re.findall(re_rule,", "start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = [] res = [] await self.set_cookies()", "self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies status, text, cookie =", "= { 'courseId': courseid, 'classId': classid, 'activeId': activeid } async", "allow_redirects=False) as resp: text = await resp.text() return { 'date':", "async def general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\" params = {", "self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp: text = await", "发送签到结果 \"\"\" await server_chan_send(results, self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks", "dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as f: data = json.load(f)", "for activeid in activeid_list: activeid = re.findall(re_rule, activeid) if not", "qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params = { 'enc': self.enc, 'name': '',", "self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def", "as resp: if resp.status != 200: print(\"cookie失效\") return None else:", "'中国', 'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid':", "await resp.text() cookies = resp.cookies return status, text, cookies def", "if not self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid, 'courseid': courseid,", "title = re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not in title: #", ") as resp: text = await resp.text() return { 'date':", "= [] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid()", "verify_ssl=False ) as resp: text = await resp.text() title =", "resp: if resp.status != 200: print(\"cookie失效\") return None else: print(\"cookie有效!\")", "asyncio from typing import Optional, List, Dict from aiohttp import", "= etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list: activeid", "self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if resp.status != 200:", "tasks = [] res = [] await self.set_cookies() # 获取所有课程的classid和course_id", "'', 'activeId': activeid, 'address': '中国', 'uid': '', 'clientip': clientip, 'latitude':", "if r is None: continue for d in r['class'].values(): resp", "elif \"二维码\" in sign_type: return await self.qcode_sign(activeid) elif \"位置\" in", "from aiohttp import ClientSession from aiohttp.cookiejar import SimpleCookie from lxml", "= data[self.username] except Exception: return False # 检测cookies是否有效 async with", "self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid, 'courseid': courseid, 'activeid': res[i][0],", "} async def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId = await", "sign_type: return await self.addr_sign(activeid) elif \"拍照\" in sign_type: return await", "'classId': classid, 'courseId': courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params)", "resp.text() soup = BeautifulSoup(text, \"lxml\") course_list = soup.find_all( 'li', class_=\"course\")", "f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: try: # 读取文件", "'r') as f: try: # 读取文件 data = json.load(f) if", "return await self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid, activeid) async", "'classId': classid, 'fid': '39037', 'courseId': courseid } async with self.session.request(", "= { 'activeId': activeid, 'classId': classid, 'fid': '39037', 'courseId': courseid", "resp: text = await resp.text() h = etree.HTML(text) activeid_list =", "method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as resp: text = await", "async def send_sign_request(self, classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\"", "resp: text = await resp.text() h = etree.HTML(text) sign_type =", "from typing import Optional, List, Dict from aiohttp import ClientSession", "= 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url ) as resp:", "= resp.cookies return status, text, cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\"", "random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files =", "url = 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')} uid =", "title } return s async def hand_sign(self, classid, courseid, activeid):", "random import asyncio from typing import Optional, List, Dict from", "with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text = await resp.text()", "activeid): \"\"\"手势签到\"\"\" params = { 'courseId': courseid, 'classId': classid, 'activeId':", "'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param =", "as e: os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img) == 0:", "= await self.login() login_status = await self.check_login_status(status, text) if login_status", "objectId = await self.upload_img(uid) params = { 'name': '', 'activeId':", "open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param", "result = {} for key, value in cookies.items(): result[key] =", "# 登录成功 else: return 1001 # 登录信息有误 async def set_cookies(self):", "'courseId': courseid, 'classId': classid, 'activeId': activeid } async with self.session.request(", "if enc is None else enc async def check_login_status(self, status,", "time.localtime()), 'status': text } async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url =", "'schoolid': self.schoolid, 'verify': 0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params)", "as resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text)[0]", "= etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def get_activeid(self,", "'fid': '', 'appType': '15', 'ifTiJiao': '1', 'objectId': objectId } async", "List[Dict] = await asyncio.gather(*tasks) for r in results: if r", "cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if \"activeid.json\"", "'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url ) as resp: text", "'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)", "from config import * from message import server_chan_send class AutoSign(object):", "courseid, activeid) elif \"二维码\" in sign_type: return await self.qcode_sign(activeid) elif", "Optional, List, Dict from aiohttp import ClientSession from aiohttp.cookiejar import", "= resp.status text = await resp.text() cookies = resp.cookies return", "cookie = await self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies status,", "def upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH)", "{ 'name': self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify': 0 }", "json.loads(text) return res_dict['objectId'] async def send_sign_request(self, classid, courseid, activeid, sign_type):", "self.tphoto_sign(activeid) else: s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status':", "in sign_type: return await self.hand_sign(classid, courseid, activeid) elif \"二维码\" in", "is None: continue for d in r['class'].values(): resp = await", "= { 'classid': classid, 'courseid': courseid, 'activeid': res[i][0], 'classname': classname,", "async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request(", "with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as", "BaseException: # 如果出错,则表示没有此activeid return False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid", "= await self.get_token() param = { 'puid': uid, '_token': token", "params = { 'enc': self.enc, 'name': '', 'activeId': activeid, 'uid':", "def send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果 \"\"\" await server_chan_send(results, self.session)", "params = { 'name': self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify':", "= IMAGE_PATH + random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url =", "await server_chan_send(results, self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = []", "Exception: return False # 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False,", "d['sign_type'] ) if resp: # 签到课程, 签到时间, 签到状态 sign_msg =", "h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\"", "'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,", "with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text = await", "'class': {}} for i in range(n): if not self.check_activeid(res[i][0]): d['class'][i]", "not cookie: # 无效则重新登录,并保存cookies status, text, cookie = await self.login()", "courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp: text", "res = [] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await", "None else: print(\"cookie有效!\") return cookies async def login(self): \"\"\" 登录并返回响应", "async def get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = []", "n: d = {'num': n, 'class': {}} for i in", "'enc': self.enc, 'name': '', 'activeId': activeid, 'uid': '', 'clientip': '',", "longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1' } async with", "all_img = 0 if len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\" else:", "try: data = json.load(f) cookies = data[self.username] except Exception: return", "# 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp:", "= { 'courseId': courseid, 'jclassId': classid } async with self.session.request(method='GET',", "classid, courseid, activeid): \"\"\"普通签到\"\"\" params = { 'activeId': activeid, 'classId':", "\"\"\"设置cookies\"\"\" cookie = await self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies", "f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: data = json.load(f) with", "soup.find_all( 'li', class_=\"course\") for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span',", "general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\" params = { 'activeId': activeid,", "courseid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False )", "import ClientSession from aiohttp.cookiejar import SimpleCookie from lxml import etree", "self.headers = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',", "config import * from message import server_chan_send class AutoSign(object): def", "with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as resp: text", "params = { 'name': '', 'activeId': activeid, 'address': '中国', 'uid':", "data[activeid]: return True except BaseException: # 如果出错,则表示没有此activeid return False def", "'ifTiJiao': '1', 'objectId': objectId } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\",", "res async def get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params =", "'fid': '', 'appType': '15', 'ifTiJiao': '1' } async with self.session.request(", "'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)", "activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res) if n: d =", "= await self.check_cookies() if not cookie: # 无效则重新登录,并保存cookies status, text,", "classid, 'courseId': courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as", "= re.findall(re_rule, activeid) if not activeid: continue sign_type = await", "list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as", "'r') as f: data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as", "as resp: text = await resp.text() h = etree.HTML(text) sign_type", "def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async with", "%H:%M\", time.localtime()), 'status': title } return s async def hand_sign(self,", "in results: if r is None: continue for d in", "return await self.qcode_sign(activeid) elif \"位置\" in sign_type: return await self.addr_sign(activeid)", "get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params = { 'activeId': activeid,", "else: s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title", "= soup.find_all( 'li', class_=\"course\") for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'],", "resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text) s", "'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15',", "'', 'appType': '15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False)", "None: continue for d in r['class'].values(): resp = await self.send_sign_request(", "in r['class'].values(): resp = await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type']", "\"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule = r'([\\d]+),2' params = {", "classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params = { 'activeId': activeid, 'classId':", "as resp: text = await resp.text() res_dict = json.loads(text) return", "d['class'][i] = { 'classid': classid, 'courseid': courseid, 'activeid': res[i][0], 'classname':", "key, value in cookies.items(): result[key] = value.value return result def", "return status, text, cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid +=", "in activeid_list: activeid = re.findall(re_rule, activeid) if not activeid: continue", "await resp.text() h = etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid", "%H:%M\", time.localtime()), 'status': title } return s async def qcode_sign(self,", "time.localtime()), 'status': title } return s async def qcode_sign(self, activeid):", "def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if \"activeid.json\" not", "elif \"位置\" in sign_type: return await self.addr_sign(activeid) elif \"拍照\" in", "'name': self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify': 0 } async", "res.append(sign_msg) if '失败' in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return", "\"\"\"二维码签到\"\"\" params = { 'enc': self.enc, 'name': '', 'activeId': activeid,", "open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies, 没有,则直接返回假 try: data =", "def send_sign_request(self, classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in", "if resp: # 签到课程, 签到时间, 签到状态 sign_msg = { 'name':", "activeid, uid): \"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid) params = {", "self.check_login_status(status, text) if login_status == 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies)", "await resp.text() token_dict = json.loads(text) return token_dict['_token'] async def upload_img(self,", "activeid, 'classId': classid, 'fid': '39037', 'courseId': courseid } async with", "results: List[Dict]): \"\"\" 发送签到结果 \"\"\" await server_chan_send(results, self.session) async def", "open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2) async def check_cookies(self) ->", "Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username = username", "'' if enc is None else enc async def check_login_status(self,", "cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as f: data =", "try: all_img = os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH) all_img", "courseid, 'classId': classid, 'activeId': activeid } async with self.session.request( method='GET',", "= json.loads(text) if data['result']: return 1000 # 登录成功 else: return", "= { 'name': self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify': 0", "activeid) if not activeid: continue sign_type = await self.get_sign_type(classid, courseid,", "r'([\\d]+),2' params = { 'courseId': courseid, 'jclassId': classid } async", "'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify': 0 } async with self.session.request(method='GET',", "'fid': '', 'appType': '15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params,", "ClientSession(headers=self.headers) self.username = username self.password = password self.schoolid = ''", "= cookies with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2) async", "password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding': 'gzip, deflate',", "with open(COOKIES_FILE_PATH, 'r') as f: # json文件有无账号cookies, 没有,则直接返回假 try: data", "data = json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w') as", "async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status = resp.status", "await self.get_all_classid() # 获取所有课程activeid和签到类型 for i in classid_courseId: coroutine =", "h = etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list:", "登录成功 else: return 1001 # 登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\"", "courseid, activeid): \"\"\"获取签到类型\"\"\" params = { 'activeId': activeid, 'classId': classid,", "'courseId': courseid } async with self.session.request(method='GET', url='https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign', params=params) as resp:", "d['activeid'], d['sign_type'] ) if resp: # 签到课程, 签到时间, 签到状态 sign_msg", "f2: json.dump(data, f2) async def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if", "text, cookie = await self.login() login_status = await self.check_login_status(status, text)", "resp: text = await resp.text() soup = BeautifulSoup(text, \"lxml\") course_list", "Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username = username self.password", "cookies=cookies) as resp: if resp.status != 200: print(\"cookie失效\") return None", "return sign_type async def get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res", "self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as resp: text =", "sign_msg = { 'name': d['classname'], 'date': resp['date'], 'status': resp['status'] }", "else: return await self.general_sign(classid, courseid, activeid) async def send_sign_result(self, results:", "else: img = IMAGE_PATH + random.choice(all_img) # uid = self.session.cookies.get_dict()['UID']", "AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers)", "res.append((activeid[0], sign_type[0])) n = len(res) if n: d = {'num':", "# uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files = {'file':", "= await resp.text() title = re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not", "resp.text() h = etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async", "\"\"\"发送签到请求\"\"\" if \"手势\" in sign_type: return await self.hand_sign(classid, courseid, activeid)", "{} for key, value in cookies.items(): result[key] = value.value return", "like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username =", "await resp.text() title = re.findall('<title>(.*)</title>', text) s = { 'date':", "if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f:", "import Optional, List, Dict from aiohttp import ClientSession from aiohttp.cookiejar", "await asyncio.gather(*tasks) for r in results: if r is None:", "'', 'clientip': '', 'useragent': '', 'latitude': '-1', 'longitude': '-1', 'fid':", "= [] re_rule = r'([\\d]+),2' params = { 'courseId': courseid,", "'w+') as f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as f: #", "classid, 'courseid': courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1] }", "with open(COOKIES_FILE_PATH, \"r\") as f: data = json.load(f) data[self.username] =", "text) s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title", "self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型 for", "title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s = { 'date':", "'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36", "None else enc async def check_login_status(self, status, text): if status", "if schoolid is None else schoolid self.enc = '' if", "class_=\"course-name\").text)) print('课程列表: ', res) return res async def get_sign_type(self, classid,", "coding: utf8 -*- import os import re import time import", "res[i][0], 'classname': classname, 'sign_type': res[i][1] } return d async def", "url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as resp: text = await resp.text()", "return s async def hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\" params", "s async def hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\" params =", "return token_dict['_token'] async def upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try:", "param = { 'puid': uid, '_token': token } async with", "async with self.session.request( method='POST', url=url, params=param, data=files ) as resp:", "登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await self.check_cookies() if", "Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+')", "def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if \"activeid.json\" not", "len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH +", "async def upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img =", "List[Dict]): \"\"\" 发送签到结果 \"\"\" await server_chan_send(results, self.session) async def start_sign_task(self):", "import json import random import asyncio from typing import Optional,", "'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def addr_sign(self,", "json.load(f) if data[activeid]: return True except BaseException: # 如果出错,则表示没有此activeid return", "0 if len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\" else: img =", "= { 'name': d['classname'], 'date': resp['date'], 'status': resp['status'] } res.append(sign_msg)", "检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if", "dict: \"\"\" 从响应对象中抽取cookies \"\"\" result = {} for key, value", "await resp.text() return { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text", "text = await resp.text() h = etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick')", "set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await self.check_cookies() if not cookie: #", "True except BaseException: # 如果出错,则表示没有此activeid return False def save_activeid(self, activeid):", "1001 # 登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await", "0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status", "(KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username", "登录并返回响应 \"\"\" params = { 'name': self.username, 'pwd': <PASSWORD>, 'schoolid':", "import BeautifulSoup from config import * from message import server_chan_send", "h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list: activeid = re.findall(re_rule, activeid) if", "deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0;", "for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ',", "# 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s = { 'date': time.strftime(\"%m-%d", "text } async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async", "resp: status = resp.status text = await resp.text() cookies =", "'15', 'ifTiJiao': '1' } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params", "== 0: return \"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH + random.choice(all_img)", "resp['status'] } res.append(sign_msg) if '失败' in resp['status']: continue # 签到成功后,新增activeid", "activeid = re.findall(re_rule, activeid) if not activeid: continue sign_type =", "= { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title } return", "self.enc, 'name': '', 'activeId': activeid, 'uid': '', 'clientip': '', 'useragent':", "os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r')", "for d in r['class'].values(): resp = await self.send_sign_request( d['classid'], d['courseid'],", "enc is None else enc async def check_login_status(self, status, text):", "} res.append(sign_msg) if '失败' in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid'])", "data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] =", "res) return res async def get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\"", "'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def tphoto_sign(self,", "await self.get_token() param = { 'puid': uid, '_token': token }", "params=params ) as resp: text = await resp.text() return {", "with self.session.request( method='POST', url=url, params=param, data=files ) as resp: text", ") as resp: text = await resp.text() title = re.findall('<title>(.*)</title>',", "从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH)", "= BeautifulSoup(text, \"lxml\") course_list = soup.find_all( 'li', class_=\"course\") for course", "sign_type: return await self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid, activeid)", "in classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict]", "resp.text() token_dict = json.loads(text) return token_dict['_token'] async def upload_img(self, uid):", "await self.hand_sign(classid, courseid, activeid) elif \"二维码\" in sign_type: return await", "return \"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH + random.choice(all_img) # uid", "'', 'useragent': '', 'latitude': '-1', 'longitude': '-1', 'fid': '', 'appType':", "__init__(self, username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding':", "server_chan_send class AutoSign(object): def __init__(self, username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\"", "'w') as f2: json.dump(data, f2) async def check_cookies(self) -> Optional[SimpleCookie]:", "从响应对象中抽取cookies \"\"\" result = {} for key, value in cookies.items():", "print(\"cookie失效\") return None else: print(\"cookie有效!\") return cookies async def login(self):", "username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding': 'gzip,", "json.loads(text) return token_dict['_token'] async def upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片", "} self.session = ClientSession(headers=self.headers) self.username = username self.password = password", "1002 data = json.loads(text) if data['result']: return 1000 # 登录成功", "= value.value return result def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with", "data[self.username] except Exception: return False # 检测cookies是否有效 async with self.session.request(method='GET',", "activeid): \"\"\"普通签到\"\"\" params = { 'activeId': activeid, 'classId': classid, 'fid':", "in sign_type: return await self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid,", "= await resp.text() h = etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for", "try: # 读取文件 data = json.load(f) if data[activeid]: return True", "= {} for key, value in cookies.items(): result[key] = value.value", "'classname': classname, 'sign_type': res[i][1] } return d async def general_sign(self,", "addr_sign(self, activeid): \"\"\"位置签到\"\"\" params = { 'name': '', 'activeId': activeid,", "'', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType':", "'courseId': courseid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False", "classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results: List[Dict] =", "re_rule = r'([\\d]+),2' params = { 'courseId': courseid, 'jclassId': classid", "r is None: continue for d in r['class'].values(): resp =", "{ 'classid': classid, 'courseid': courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type':", "res_dict = json.loads(text) return res_dict['objectId'] async def send_sign_request(self, classid, courseid,", "async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as resp:", "'courseid': courseid, 'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1] } return", "f2) async def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = []", "res[i][1] } return d async def general_sign(self, classid, courseid, activeid):", "'sign_type': res[i][1] } return d async def general_sign(self, classid, courseid,", "{'num': n, 'class': {}} for i in range(n): if not", "'objectId': objectId } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params )", "= await resp.text() res_dict = json.loads(text) return res_dict['objectId'] async def", "elif \"拍照\" in sign_type: return await self.tphoto_sign(activeid) else: return await", "params = { 'activeId': activeid, 'classId': classid, 'courseId': courseid }", "'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1'", "没有,则直接返回假 try: data = json.load(f) cookies = data[self.username] except Exception:", "as resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text)", "as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: data =", "cookies = data[self.username] except Exception: return False # 检测cookies是否有效 async", "asyncio.gather(*tasks) for r in results: if r is None: continue", "if \"手势\" in sign_type: return await self.hand_sign(classid, courseid, activeid) elif", "await self.tphoto_sign(activeid) else: return await self.general_sign(classid, courseid, activeid) async def", "self.password = password self.schoolid = '' if schoolid is None", "activeid): \"\"\"二维码签到\"\"\" params = { 'enc': self.enc, 'name': '', 'activeId':", "self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status = resp.status text =", "etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def get_activeid(self, classid,", "'activeid': res[i][0], 'classname': classname, 'sign_type': res[i][1] } return d async", "= self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token() param = { 'puid':", "{ 'name': d['classname'], 'date': resp['date'], 'status': resp['status'] } res.append(sign_msg) if", "} async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp:", "enc async def check_login_status(self, status, text): if status == 403:", "def hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\" params = { 'courseId':", "self.addr_sign(activeid) elif \"拍照\" in sign_type: return await self.tphoto_sign(activeid) else: return", "uid): \"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid) params = { 'name':", "\"\"\"手势签到\"\"\" params = { 'courseId': courseid, 'classId': classid, 'activeId': activeid", "\"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as f: data = json.load(f) data[self.username]", "1000 def dict_from_simple_cookie(self, cookies) -> dict: \"\"\" 从响应对象中抽取cookies \"\"\" result", "<PASSWORD>, 'schoolid': self.schoolid, 'verify': 0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login',", "\"\"\"位置签到\"\"\" params = { 'name': '', 'activeId': activeid, 'address': '中国',", "hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\" params = { 'courseId': courseid,", "'15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp:", "sign_type: return await self.qcode_sign(activeid) elif \"位置\" in sign_type: return await", "[] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() #", "# 获取所有课程activeid和签到类型 for i in classid_courseId: coroutine = self.get_activeid(i[1], i[0],", "s async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params = { 'enc':", "courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res) if n: d", "allow_redirects=False, cookies=cookies) as resp: if resp.status != 200: print(\"cookie失效\") return", "if data[activeid]: return True except BaseException: # 如果出错,则表示没有此activeid return False", "activeid in activeid_list: activeid = re.findall(re_rule, activeid) if not activeid:", "= await asyncio.gather(*tasks) for r in results: if r is", "save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as f: data", "403: return 1002 data = json.loads(text) if data['result']: return 1000", "r['class'].values(): resp = await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] )", "async def get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params = {", "classid, courseid, activeid): \"\"\"手势签到\"\"\" params = { 'courseId': courseid, 'classId':", "classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型 for i in classid_courseId:", "course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res) return res async def", "self.hand_sign(classid, courseid, activeid) elif \"二维码\" in sign_type: return await self.qcode_sign(activeid)", "in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async def", "\"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url )", "def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params = { 'name': '', 'activeId':", "data['result']: return 1000 # 登录成功 else: return 1001 # 登录信息有误", "except Exception: return False # 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew',", "# 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async def close_session(self): await self.session.close()", "async def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params = { 'name': '',", "data=files ) as resp: text = await resp.text() res_dict =", "cookies) -> dict: \"\"\" 从响应对象中抽取cookies \"\"\" result = {} for", "-> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction')", "n = len(res) if n: d = {'num': n, 'class':", "'ifTiJiao': '1' } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params )", "'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1' } async", "classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in sign_type: return", "from lxml import etree from bs4 import BeautifulSoup from config", "if not activeid: continue sign_type = await self.get_sign_type(classid, courseid, activeid[0])", "\"lxml\") course_list = soup.find_all( 'li', class_=\"course\") for course in course_list:", "await resp.text() title = re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not in", "self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as", "login_status = await self.check_login_status(status, text) if login_status == 1000: cookies", "title = re.findall('<title>(.*)</title>', text) s = { 'date': time.strftime(\"%m-%d %H:%M\",", "\"二维码\" in sign_type: return await self.qcode_sign(activeid) elif \"位置\" in sign_type:", "classname, 'sign_type': res[i][1] } return d async def general_sign(self, classid,", "(Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36',", "bs4 import BeautifulSoup from config import * from message import", "return 1000 def dict_from_simple_cookie(self, cookies) -> dict: \"\"\" 从响应对象中抽取cookies \"\"\"", "r in results: if r is None: continue for d", "await self.qcode_sign(activeid) elif \"位置\" in sign_type: return await self.addr_sign(activeid) elif", "def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\") as f:", "1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies) -> dict:", "200: print(\"cookie失效\") return None else: print(\"cookie有效!\") return cookies async def", "async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp: text =", "'classId': classid, 'activeId': activeid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\",", "AutoSign(object): def __init__(self, username, password, schoolid=None, enc=None): \"\"\"初始化就进行登录\"\"\" self.headers =", "f: data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid]", "return s async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params = {", "\"\"\" params = { 'name': self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid,", "'1', 'objectId': objectId } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params", "def set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await self.check_cookies() if not cookie:", "Safari/537.36', } self.session = ClientSession(headers=self.headers) self.username = username self.password =", "cookie: # 无效则重新登录,并保存cookies status, text, cookie = await self.login() login_status", "utf8 -*- import os import re import time import json", "course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res) return res", "\"r\") as f: data = json.load(f) data[self.username] = cookies with", "else: return 1001 # 登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie", "False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username if \"activeid.json\"", "'w') as f2: data[activeid] = True json.dump(data, f2) async def", "schoolid is None else schoolid self.enc = '' if enc", "text, cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if", "as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: try: #", "def general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\" params = { 'activeId':", "return await self.general_sign(classid, courseid, activeid) async def send_sign_result(self, results: List[Dict]):", "'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def get_token(self):", "import time import json import random import asyncio from typing", "status = resp.status text = await resp.text() cookies = resp.cookies", "re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not in title: # 网页标题不含签到成功,则为拍照签到 return", "签到状态 sign_msg = { 'name': d['classname'], 'date': resp['date'], 'status': resp['status']", "url='https://passport2.chaoxing.com/api/login', params=params) as resp: status = resp.status text = await", "{ 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def", "activeid_list: activeid = re.findall(re_rule, activeid) if not activeid: continue sign_type", "self.qcode_sign(activeid) elif \"位置\" in sign_type: return await self.addr_sign(activeid) elif \"拍照\"", "in sign_type: return await self.addr_sign(activeid) elif \"拍照\" in sign_type: return", "res_dict['objectId'] async def send_sign_request(self, classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if", "re.findall('<title>(.*)</title>', text) s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status':", "\"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except Exception as", "i in classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine) results:", "activeid) async def send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果 \"\"\" await", "} return d async def general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\"", "time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\"", "activeid) elif \"二维码\" in sign_type: return await self.qcode_sign(activeid) elif \"位置\"", "return 1001 else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies) ->", "params = { 'courseId': courseid, 'classId': classid, 'activeId': activeid }", "e: os.mkdir(IMAGE_PATH) all_img = 0 if len(all_img) == 0: return", "return res_dict['objectId'] async def send_sign_request(self, classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\"", "title } return s async def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params", "await self.upload_img(uid) params = { 'name': '', 'activeId': activeid, 'address':", "h = etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def", "= await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res)", "if data['result']: return 1000 # 登录成功 else: return 1001 #", "} async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status =", "d = {'num': n, 'class': {}} for i in range(n):", "= { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent':", "'useragent': '', 'latitude': '-1', 'longitude': '-1', 'fid': '', 'appType': '15'", "'latitude': '-1', 'longitude': '-1', 'fid': '', 'appType': '15' } async", "time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async def tphoto_sign(self, activeid,", "def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = [] res = [] await", "as resp: text = await resp.text() return { 'date': time.strftime(\"%m-%d", "resp: text = await resp.text() res_dict = json.loads(text) return res_dict['objectId']", "status, text, cookie = await self.login() login_status = await self.check_login_status(status,", "return { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text } async", "1000 # 登录成功 else: return 1001 # 登录信息有误 async def", "self.general_sign(classid, courseid, activeid) async def send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果", "self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies) -> dict: \"\"\" 从响应对象中抽取cookies", "# 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except Exception as e:", "resp.status text = await resp.text() cookies = resp.cookies return status,", "读取文件 data = json.load(f) if data[activeid]: return True except BaseException:", "status, text): if status == 403: return 1002 data =", "= json.load(f) cookies = data[self.username] except Exception: return False #", "params = { 'courseId': courseid, 'jclassId': classid } async with", "classid, 'fid': '39037', 'courseId': courseid } async with self.session.request( method='GET',", "open(ACTIVEID_FILE_PATH, 'r') as f: try: # 读取文件 data = json.load(f)", "-*- coding: utf8 -*- import os import re import time", "course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res)", "await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res) if", "in range(n): if not self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid,", "= 0 if len(all_img) == 0: return \"a5d588f7bce1994323c348982332e470\" else: img", "token } async with self.session.request( method='POST', url=url, params=param, data=files )", "data = json.load(f) cookies = data[self.username] except Exception: return False", "clientip, 'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao':", "await self.addr_sign(activeid) elif \"拍照\" in sign_type: return await self.tphoto_sign(activeid) else:", "check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not in os.listdir(COOKIES_PATH): with", "self.username = username self.password = password self.schoolid = '' if", "= h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list: activeid = re.findall(re_rule, activeid)", "not activeid: continue sign_type = await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0],", "text = await resp.text() title = re.findall('<title>(.*)</title>', text)[0] if \"签到成功\"", "'activeId': activeid } async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False", "time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title } return s async def", "async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = [] res = []", "NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', }", "with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if resp.status !=", "get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule =", "enc=None): \"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9',", "activeid): \"\"\"位置签到\"\"\" params = { 'name': '', 'activeId': activeid, 'address':", "sign_type: return await self.hand_sign(classid, courseid, activeid) elif \"二维码\" in sign_type:", "\"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp:", "verify_ssl=False, params=params) as resp: text = await resp.text() h =", "text = await resp.text() res_dict = json.loads(text) return res_dict['objectId'] async", ") as resp: text = await resp.text() res_dict = json.loads(text)", "open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as f:", "json.dump(data, f2) async def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\"", "upload_img(self, uid): \"\"\"上传图片\"\"\" # 从图片文件夹内随机选择一张图片 try: all_img = os.listdir(IMAGE_PATH) except", "'appType': '15', 'ifTiJiao': '1', 'objectId': objectId } async with self.session.request(", "'appType': '15', 'ifTiJiao': '1' } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\",", "tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid) params =", "data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w') as f2: json.dump(data, f2)", "json.loads(text) if data['result']: return 1000 # 登录成功 else: return 1001", "'1' } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as", "resp.text() title = re.findall('<title>(.*)</title>', text) s = { 'date': time.strftime(\"%m-%d", "else: print(\"cookie有效!\") return cookies async def login(self): \"\"\" 登录并返回响应 \"\"\"", "async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn\", params=params, verify_ssl=False ) as resp:", "'address': '中国', 'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude': longitude,", "async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies) as resp: if resp.status", "def check_login_status(self, status, text): if status == 403: return 1002", "resp.text() title = re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not in title:", "print(\"cookie有效!\") return cookies async def login(self): \"\"\" 登录并返回响应 \"\"\" params", "'activeId': activeid, 'uid': '', 'clientip': '', 'useragent': '', 'latitude': '-1',", "params=params) as resp: text = await resp.text() h = etree.HTML(text)", "aiohttp import ClientSession from aiohttp.cookiejar import SimpleCookie from lxml import", "= os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH) all_img = 0", "-*- import os import re import time import json import", "'_token': token } async with self.session.request( method='POST', url=url, params=param, data=files", "# 登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie = await self.check_cookies()", "-> dict: \"\"\" 从响应对象中抽取cookies \"\"\" result = {} for key,", "= re.findall('<title>(.*)</title>', text) s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()),", "sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def get_activeid(self, classid, courseid,", "sign_type[0])) n = len(res) if n: d = {'num': n,", "如果出错,则表示没有此activeid return False def save_activeid(self, activeid): \"\"\"保存已成功签到的activeid\"\"\" activeid += self.username", "'li', class_=\"course\") for course in course_list: res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text))", "if \"cookies.json\" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f:", "[] res = [] await self.set_cookies() # 获取所有课程的classid和course_id classid_courseId =", "self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await resp.text() soup =", "method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp: text = await resp.text()", "not in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s =", "= { 'puid': uid, '_token': token } async with self.session.request(", "f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: data = json.load(f)", "'', 'activeId': activeid, 'uid': '', 'clientip': '', 'useragent': '', 'latitude':", "self.session = ClientSession(headers=self.headers) self.username = username self.password = password self.schoolid", "dict_from_simple_cookie(self, cookies) -> dict: \"\"\" 从响应对象中抽取cookies \"\"\" result = {}", "} async with self.session.request( method='GET', url=\"https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/preSign\", params=params, verify_ssl=False ) as", "os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r')", "re.findall(re_rule, activeid) if not activeid: continue sign_type = await self.get_sign_type(classid,", "= await resp.text() title = re.findall('<title>(.*)</title>', text) s = {", "with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp: text =", "all_img = os.listdir(IMAGE_PATH) except Exception as e: os.mkdir(IMAGE_PATH) all_img =", "url=url ) as resp: text = await resp.text() token_dict =", "\"\"\"初始化就进行登录\"\"\" self.headers = { 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept':", "as resp: text = await resp.text() h = etree.HTML(text) activeid_list", "server_chan_send(results, self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks = [] res", "as f2: data[activeid] = True json.dump(data, f2) async def get_all_classid(self)", "'name': '', 'activeId': activeid, 'address': '中国', 'uid': '', 'clientip': clientip,", "get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async with self.session.request(method='GET',", "async def tphoto_sign(self, activeid, uid): \"\"\"拍照签到\"\"\" objectId = await self.upload_img(uid)", "async with self.session.request( method='GET', url=url ) as resp: text =", "open(ACTIVEID_FILE_PATH, 'r') as f: data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w')", "time.localtime()), 'status': title } return s async def hand_sign(self, classid,", "message import server_chan_send class AutoSign(object): def __init__(self, username, password, schoolid=None,", "courseid, 'jclassId': classid } async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params)", "params=params) as resp: status = resp.status text = await resp.text()", "'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100", "= { 'enc': self.enc, 'name': '', 'activeId': activeid, 'uid': '',", "resp.text() return { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': text }", "await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if resp: #", "签到时间, 签到状态 sign_msg = { 'name': d['classname'], 'date': resp['date'], 'status':", "uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img,", "'activeId': activeid, 'classId': classid, 'fid': '39037', 'courseId': courseid } async", "text = await resp.text() token_dict = json.loads(text) return token_dict['_token'] async", "= {'num': n, 'class': {}} for i in range(n): if", "\"cookies.json\" not in os.listdir(COOKIES_PATH): with open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\")", "continue sign_type = await self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n", "json import random import asyncio from typing import Optional, List,", "return False # 检测cookies是否有效 async with self.session.request(method='GET', url='http://mooc1-1.chaoxing.com/api/workTestPendingNew', allow_redirects=False, cookies=cookies)", "= await self.check_login_status(status, text) if login_status == 1000: cookies =", "soup = BeautifulSoup(text, \"lxml\") course_list = soup.find_all( 'li', class_=\"course\") for", "json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] = True json.dump(data,", "= await resp.text() token_dict = json.loads(text) return token_dict['_token'] async def", "self.get_token() param = { 'puid': uid, '_token': token } async", "if \"签到成功\" not in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else:", "activeid, 'uid': '', 'clientip': '', 'useragent': '', 'latitude': '-1', 'longitude':", "'activeId': activeid, 'address': '中国', 'uid': '', 'clientip': clientip, 'latitude': latitude,", "'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token", "os import re import time import json import random import", "time import json import random import asyncio from typing import", "with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp: text = await", "time.localtime()), 'status': text } async def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params", "= await self.upload_img(uid) params = { 'name': '', 'activeId': activeid,", "aiohttp.cookiejar import SimpleCookie from lxml import etree from bs4 import", "\"\"\" 从响应对象中抽取cookies \"\"\" result = {} for key, value in", "= self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files = {'file': open(img, 'rb')}", "resp: text = await resp.text() return { 'date': time.strftime(\"%m-%d %H:%M\",", "send_sign_result(self, results: List[Dict]): \"\"\" 发送签到结果 \"\"\" await server_chan_send(results, self.session) async", "status == 403: return 1002 data = json.loads(text) if data['result']:", "in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s = {", "etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in activeid_list: activeid =", "# 无效则重新登录,并保存cookies status, text, cookie = await self.login() login_status =", "'', 'appType': '15', 'ifTiJiao': '1', 'objectId': objectId } async with", "send_sign_request(self, classid, courseid, activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in sign_type:", "text = await resp.text() title = re.findall('<title>(.*)</title>', text) s =", "return 1001 # 登录信息有误 async def set_cookies(self): \"\"\"设置cookies\"\"\" cookie =", "resp: text = await resp.text() title = re.findall('<title>(.*)</title>', text)[0] if", "= json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2: data[activeid] = True", "%H:%M\", time.localtime()), 'status': text } async def addr_sign(self, activeid): \"\"\"位置签到\"\"\"", "0: return \"a5d588f7bce1994323c348982332e470\" else: img = IMAGE_PATH + random.choice(all_img) #", "self.get_sign_type(classid, courseid, activeid[0]) res.append((activeid[0], sign_type[0])) n = len(res) if n:", "= json.load(f) if data[activeid]: return True except BaseException: # 如果出错,则表示没有此activeid", "'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title } return s async", "method='GET', url=url ) as resp: text = await resp.text() token_dict", "WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.100 Safari/537.36', } self.session =", "} async def addr_sign(self, activeid): \"\"\"位置签到\"\"\" params = { 'name':", "resp.cookies return status, text, cookies def check_activeid(self, activeid): \"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid", "def get_sign_type(self, classid, courseid, activeid): \"\"\"获取签到类型\"\"\" params = { 'activeId':", "'verify': 0 } async with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp:", "text = await resp.text() h = etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()')", "username self.password = password self.schoolid = '' if schoolid is", "= re.findall('<title>(.*)</title>', text)[0] if \"签到成功\" not in title: # 网页标题不含签到成功,则为拍照签到", "[] async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await", "'appType': '15' } async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as", "'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text = await resp.text() return", "'status': text } async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid'", "login(self): \"\"\" 登录并返回响应 \"\"\" params = { 'name': self.username, 'pwd':", "i in range(n): if not self.check_activeid(res[i][0]): d['class'][i] = { 'classid':", "from aiohttp.cookiejar import SimpleCookie from lxml import etree from bs4", "objectId } async with self.session.request( method=\"GET\", url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as", "\"签到成功\" not in title: # 网页标题不含签到成功,则为拍照签到 return self.tphoto_sign(activeid) else: s", "s = { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title }", "results: List[Dict] = await asyncio.gather(*tasks) for r in results: if", "{ 'enc': self.enc, 'name': '', 'activeId': activeid, 'uid': '', 'clientip':", "'uid': '', 'clientip': '', 'useragent': '', 'latitude': '-1', 'longitude': '-1',", "continue for d in r['class'].values(): resp = await self.send_sign_request( d['classid'],", "classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule = r'([\\d]+),2'", "'longitude': '-1', 'fid': '', 'appType': '15' } async with self.session.request('GET',", "return True except BaseException: # 如果出错,则表示没有此activeid return False def save_activeid(self,", "courseid, activeid): \"\"\"手势签到\"\"\" params = { 'courseId': courseid, 'classId': classid,", "'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'User-Agent': 'Mozilla/5.0 (Windows", "classid } async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp:", "text = await resp.text() return { 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()),", "as f: data = json.load(f) with open(ACTIVEID_FILE_PATH, 'w') as f2:", "activeid += self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with open(ACTIVEID_FILE_PATH,", "async with self.session.request(method='GET', url='http://mooc1-2.chaoxing.com/visit/interaction') as resp: text = await resp.text()", "activeid): \"\"\"获取签到类型\"\"\" params = { 'activeId': activeid, 'classId': classid, 'courseId':", "= username self.password = password self.schoolid = '' if schoolid", "resp.text() h = etree.HTML(text) activeid_list = h.xpath('//*[@id=\"startList\"]/div/div/@onclick') for activeid in", "\"\"\" await server_chan_send(results, self.session) async def start_sign_task(self): \"\"\"开始所有签到任务\"\"\" tasks =", "not self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid, 'courseid': courseid, 'activeid':", "'latitude': latitude, 'longitude': longitude, 'fid': '', 'appType': '15', 'ifTiJiao': '1',", "continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async def close_session(self): await", "def qcode_sign(self, activeid): \"\"\"二维码签到\"\"\" params = { 'enc': self.enc, 'name':", "as resp: text = await resp.text() soup = BeautifulSoup(text, \"lxml\")", "is None else enc async def check_login_status(self, status, text): if", "d async def general_sign(self, classid, courseid, activeid): \"\"\"普通签到\"\"\" params =", "+ random.choice(all_img) # uid = self.session.cookies.get_dict()['UID'] url = 'https://pan-yz.chaoxing.com/upload' files", "return result def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH, \"r\")", "%H:%M\", time.localtime()), 'status': text } async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url", "resp['date'], 'status': resp['status'] } res.append(sign_msg) if '失败' in resp['status']: continue", "} return s async def hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\"", "d['classname'], 'date': resp['date'], 'status': resp['status'] } res.append(sign_msg) if '失败' in", "self.session.request( method='POST', url=url, params=param, data=files ) as resp: text =", "sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in sign_type: return await self.hand_sign(classid, courseid,", "== 1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else:", "SimpleCookie from lxml import etree from bs4 import BeautifulSoup from", "'activeId': activeid, 'classId': classid, 'courseId': courseid } async with self.session.request(method='GET',", "result[key] = value.value return result def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\"", "await resp.text() soup = BeautifulSoup(text, \"lxml\") course_list = soup.find_all( 'li',", "None else schoolid self.enc = '' if enc is None", "params=params, verify_ssl=False ) as resp: text = await resp.text() title", "BeautifulSoup from config import * from message import server_chan_send class", "\"\"\"检测activeid是否存在,不存在则添加\"\"\" activeid += self.username if \"activeid.json\" not in os.listdir(ACTIVEID_PATH): with", "range(n): if not self.check_activeid(res[i][0]): d['class'][i] = { 'classid': classid, 'courseid':", "return 1000 # 登录成功 else: return 1001 # 登录信息有误 async", "'-1', 'longitude': '-1', 'fid': '', 'appType': '15' } async with", "password self.schoolid = '' if schoolid is None else schoolid", "'15', 'ifTiJiao': '1', 'objectId': objectId } async with self.session.request( method=\"GET\",", "res.append((course.attrs['courseid'], course.attrs['clazzid'], course.find_next('span', class_=\"course-name\").text)) print('课程列表: ', res) return res async", "'puid': uid, '_token': token } async with self.session.request( method='POST', url=url,", "'status': title } return s async def hand_sign(self, classid, courseid,", "for i in classid_courseId: coroutine = self.get_activeid(i[1], i[0], i[2]) tasks.append(coroutine)", "async with self.session.request('GET', 'https://mobilelearn.chaoxing.com/pptSign/stuSignajax', params=params, allow_redirects=False) as resp: text =", "= password self.schoolid = '' if schoolid is None else", "{'file': open(img, 'rb')} uid = self.session.cookie_jar.filter_cookies('').get('UID').value token = await self.get_token()", "self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp: text = await resp.text()", "else: self.session.cookie_jar.update_cookies(cookie) return 1000 def dict_from_simple_cookie(self, cookies) -> dict: \"\"\"", "} async def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with", "获取所有课程的classid和course_id classid_courseId = await self.get_all_classid() # 获取所有课程activeid和签到类型 for i in", "{ 'date': time.strftime(\"%m-%d %H:%M\", time.localtime()), 'status': title } return s", "import etree from bs4 import BeautifulSoup from config import *", "url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as resp: text = await resp.text() h", "params=params, allow_redirects=False) as resp: text = await resp.text() return {", "params=param, data=files ) as resp: text = await resp.text() res_dict", "= h.xpath('//div[@class=\"location\"]/span/text()') return sign_type async def get_activeid(self, classid, courseid, classname):", "async def hand_sign(self, classid, courseid, activeid): \"\"\"手势签到\"\"\" params = {", "for r in results: if r is None: continue for", "Dict from aiohttp import ClientSession from aiohttp.cookiejar import SimpleCookie from", "resp.text() cookies = resp.cookies return status, text, cookies def check_activeid(self,", "url=\"https://mobilelearn.chaoxing.com/pptSign/stuSignajax\", params=params ) as resp: text = await resp.text() return", "get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET', url=url", "f2) async def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\" if \"cookies.json\" not", "resp = await self.send_sign_request( d['classid'], d['courseid'], d['activeid'], d['sign_type'] ) if", "await self.login() login_status = await self.check_login_status(status, text) if login_status ==", "if '失败' in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res", "as resp: status = resp.status text = await resp.text() cookies", "with open(COOKIES_FILE_PATH, 'w+') as f: f.write(\"{}\") with open(COOKIES_FILE_PATH, 'r') as", "async def get_all_classid(self) -> list: \"\"\"获取课程主页中所有课程的classid和courseid\"\"\" res = [] async", "value.value return result def save_cookies(self, cookies: dict): \"\"\"保存cookies\"\"\" with open(COOKIES_FILE_PATH,", "f.write(\"{}\") with open(ACTIVEID_FILE_PATH, 'r') as f: try: # 读取文件 data", "'name': '', 'activeId': activeid, 'uid': '', 'clientip': '', 'useragent': '',", "with self.session.request(method='GET', url='https://passport2.chaoxing.com/api/login', params=params) as resp: status = resp.status text", "return await self.addr_sign(activeid) elif \"拍照\" in sign_type: return await self.tphoto_sign(activeid)", "f: data = json.load(f) data[self.username] = cookies with open(COOKIES_FILE_PATH, 'w')", "in sign_type: return await self.qcode_sign(activeid) elif \"位置\" in sign_type: return", "def get_token(self): \"\"\"获取上传文件所需参数token\"\"\" url = 'https://pan-yz.chaoxing.com/api/token/uservalid' async with self.session.request( method='GET',", "def get_activeid(self, classid, courseid, classname): \"\"\"访问任务面板获取课程的活动id\"\"\" res = [] re_rule", "cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie) return", "res = [] re_rule = r'([\\d]+),2' params = { 'courseId':", "d['courseid'], d['activeid'], d['sign_type'] ) if resp: # 签到课程, 签到时间, 签到状态", "as f2: json.dump(data, f2) async def check_cookies(self) -> Optional[SimpleCookie]: \"\"\"检测json文件内是否存有cookies,有则检测,无则登录\"\"\"", "import server_chan_send class AutoSign(object): def __init__(self, username, password, schoolid=None, enc=None):", "%H:%M\", time.localtime()), 'status': text } async def tphoto_sign(self, activeid, uid):", "self.username, 'pwd': <PASSWORD>, 'schoolid': self.schoolid, 'verify': 0 } async with", "'失败' in resp['status']: continue # 签到成功后,新增activeid self.save_activeid(d['activeid']) return res async", "= r'([\\d]+),2' params = { 'courseId': courseid, 'jclassId': classid }", "activeid, 'address': '中国', 'uid': '', 'clientip': clientip, 'latitude': latitude, 'longitude':", "activeid, sign_type): \"\"\"发送签到请求\"\"\" if \"手势\" in sign_type: return await self.hand_sign(classid,", "1000: cookies = self.dict_from_simple_cookie(cookie) self.save_cookies(cookies) else: return 1001 else: self.session.cookie_jar.update_cookies(cookie)", "if not cookie: # 无效则重新登录,并保存cookies status, text, cookie = await", "for i in range(n): if not self.check_activeid(res[i][0]): d['class'][i] = {", "'clientip': '', 'useragent': '', 'latitude': '-1', 'longitude': '-1', 'fid': '',", "# -*- coding: utf8 -*- import os import re import", "json文件有无账号cookies, 没有,则直接返回假 try: data = json.load(f) cookies = data[self.username] except", "value in cookies.items(): result[key] = value.value return result def save_cookies(self,", "'jclassId': classid } async with self.session.request(method='GET', url=\"https://mobilelearn.chaoxing.com/widget/pcpick/stu/index\", verify_ssl=False, params=params) as", "uid, '_token': token } async with self.session.request( method='POST', url=url, params=param,", "= await resp.text() h = etree.HTML(text) sign_type = h.xpath('//div[@class=\"location\"]/span/text()') return" ]
[ "scipy.special import comb import argparse import sys def parseArgs(): '''Function", "file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file, default", "/ v-1 ) b0=(1-m)*(m * (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,))", "import beta as B from scipy.special import comb import argparse", "= %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant", "line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not", "f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args):", "return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1)", "#data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q", "float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)]", "inf from numpy import nan from scipy.optimize import fmin from", "cons file, for fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions',", "help='Family size cutoff (consensus cutoff) for variant calling. [default =", "nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) /", "args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 = np.array(n1)", "%(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[]", "= np.array(n1) a1 = np.array(a1) pos = np.array(pos) data =", "# rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else: #", "return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as", "return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) / v-1", "barcoded amplicon \\ sequencing data with Unique molecular \\ identifiers", "in \"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac", "as f: for line in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions", "import nan from scipy.optimize import fmin from scipy.stats import beta", "import inf from numpy import nan from scipy.optimize import fmin", "parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file", "'''Function for parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded", "nan from scipy.optimize import fmin from scipy.stats import beta from", "#lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def", "n1 = np.array(n1) a1 = np.array(a1) pos = np.array(pos) data", "molecular \\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons", "argparse import sys def parseArgs(): '''Function for parsing arguments''' parser", "import sys def parseArgs(): '''Function for parsing arguments''' parser =", "dest='nonbgposfile', help='Path to file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of", "np from numpy import inf from numpy import nan from", "cutoff) for variant calling. [default = %(default)s]', default=3) args =", "data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g:", "comb import argparse import sys def parseArgs(): '''Function for parsing", "data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params data", "parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling. [default", "g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if", "#else: # rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf'", "= argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon \\ sequencing data with", "= -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)]", "args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[]", "def betaNLL(params,*args): a,b = params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf)", "#!python import numpy as np from numpy import inf from", "scipy.stats import beta from scipy.special import beta as B from", "-lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m)", "'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data)", "result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile)", "bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file with non-background positions')", "f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 = np.array(n1) a1 = np.array(a1)", ">= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1 >=", "Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1", "def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) / v-1 )", "-10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] #", "rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1", "# Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file) if __name__=='__main__': args=parseArgs() run_fit_bgmodel(args)", "from numpy import inf from numpy import nan from scipy.optimize", "f1 = np.array(f1) n1 = np.array(n1) a1 = np.array(a1) pos", "nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1", "args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)] #else:", "for variant calling. [default = %(default)s]', default=3) args = parser.parse_args(sys.argv[1:])", "v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[]", "parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[] with", "np.array(a1) pos = np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos))", "= np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0]))", "amplicon \\ sequencing data with Unique molecular \\ identifiers (UMI)\")", "(1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if", "= parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[]", "identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for", "variant calling. [default = %(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args)", "cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def", "* (1-m) / v-1 ) b0=(1-m)*(m * (1-m) / v-1", "open(args.nonbgposfile) as f: for line in f: line=line.rstrip() nonbgpos.append(line) else:", "= np.array(f1) n1 = np.array(n1) a1 = np.array(a1) pos =", "/ v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile:", "sys def parseArgs(): '''Function for parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline", "scipy.special import beta as B from scipy.special import comb import", "output file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus", "#Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >=", "import fmin from scipy.stats import beta from scipy.special import beta", "open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a)", "argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon \\ sequencing data with Unique", "of output file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff", "= %(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[]", "not in \"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if", "(1-m) / v-1 ) b0=(1-m)*(m * (1-m) / v-1 )", "def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[] with open(filename) as", "parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family", "(consensus cutoff) for variant calling. [default = %(default)s]', default=3) args", "import argparse import sys def parseArgs(): '''Function for parsing arguments'''", "parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not in \"\": famsize=parts[-4]", "with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q =", "pos = np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with", "open(filename) as f: for line in f: if not line.startswith('Sample", "name not in \"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3]", "alt=parts[-1] count=parts[-3] if frac > 0 and alt not in", "posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params", "if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f: for line in", "arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon \\ sequencing", ") result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with", "n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b", "calling. [default = %(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args) def", "f: for line in f: if not line.startswith('Sample Name'): line=line.rstrip('\\n')", "numpy as np from numpy import inf from numpy import", "for parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon", "name=parts[3] #print(name) if name not in \"\": famsize=parts[-4] if int(famsize)==fsize:", "in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize)", "line in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file:", "rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file) if", "parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file with non-background positions') parser.add_argument('-out',", "from scipy.optimize import fmin from scipy.stats import beta from scipy.special", "for analyzing barcoded amplicon \\ sequencing data with Unique molecular", "size cutoff (consensus cutoff) for variant calling. [default = %(default)s]',", "0 and alt not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count))", "pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg)", "as np from numpy import inf from numpy import nan", "\\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file,", "f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize)", "spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f: for line", "line in f: if not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2]", "= np.array(a1) pos = np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1])", "a0=m*(m * (1-m) / v-1 ) b0=(1-m)*(m * (1-m) /", "m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) / v-1 ) b0=(1-m)*(m *", "= np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data)", "f: if not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name)", "with Unique molecular \\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path", "beta from scipy.special import beta as B from scipy.special import", "for fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path", "a,b = params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask", "np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll = -lg[mask].sum()", "float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)]", "f1=[] c1=[] posx=[] data=[] with open(filename) as f: for line", "Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not in", "parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters", "scipy.optimize import fmin from scipy.stats import beta from scipy.special import", "data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll", "params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg)", "def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f:", "if name not in \"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1]", ">= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file) if __name__=='__main__':", "data with Unique molecular \\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file',", "line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize)", "betaNLL(params,*args): a,b = params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg)", "import comb import argparse import sys def parseArgs(): '''Function for", "[default = %(default)s]', default=3) args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3):", "and alt not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos)", "in f: if not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3]", "Unique molecular \\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to", "nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1)", "args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 = np.array(n1) a1", "v-1 ) b0=(1-m)*(m * (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result)", "<filename>build/scripts-3.6/fit_background_model.py<gh_stars>0 #!python import numpy as np from numpy import inf", "numpy import nan from scipy.optimize import fmin from scipy.stats import", "non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file, default = %(default)s]\",default=\"bgmodel.params\")", "default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for", "not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name", "\\ sequencing data with Unique molecular \\ identifiers (UMI)\") parser.add_argument('-cons',", "file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff)", "nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m", "(UMI)\") parser.add_argument('-cons', '--cons_file', dest='cons_file', help='Path to cons file, for fitting", "with open(filename) as f: for line in f: if not", "def parseArgs(): '''Function for parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for", "to cons file, for fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile',", "famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac > 0", "line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not in \"\":", ") b0=(1-m)*(m * (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def", "run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120] if args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f: for", "return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[] with open(filename)", "the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file with non-background", "g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel':", "import beta from scipy.special import beta as B from scipy.special", "from scipy.special import comb import argparse import sys def parseArgs():", "data=[] with open(filename) as f: for line in f: if", "for line in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not", "default=3) args = parser.parse_args(sys.argv[1:]) return(args) def parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[]", "# rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file)", "analyzing barcoded amplicon \\ sequencing data with Unique molecular \\", "not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 =", "else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 =", "\"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac >", "#print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params data = np.array(args[0])", "import numpy as np from numpy import inf from numpy", "frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac > 0 and alt not", "numpy import inf from numpy import nan from scipy.optimize import", "file, for fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile',", "np.array(n1) a1 = np.array(a1) pos = np.array(pos) data = np.array(data)", "parseArgs(): '''Function for parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for analyzing", "'--cons_file', dest='cons_file', help='Path to cons file, for fitting parameters of", "for line in f: if not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t')", "args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1 = np.array(n1) a1 =", "n1=[] f1=[] c1=[] posx=[] data=[] with open(filename) as f: for", "#a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': #", "= params data = np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask =", "#print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b = params data =", "count=parts[-3] if frac > 0 and alt not in 'N':", "dest='cons_file', help='Path to cons file, for fitting parameters of the", "np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data)", "np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as", "frac > 0 and alt not in 'N': cov=int(parts[-5]) f1.append(float(frac))", "> 0 and alt not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov))", "#plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >=", "cutoff (consensus cutoff) for variant calling. [default = %(default)s]', default=3)", "if frac > 0 and alt not in 'N': cov=int(parts[-5])", "positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize',", "help='Path to file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output", "%(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size cutoff (consensus cutoff) for variant calling.", "from scipy.stats import beta from scipy.special import beta as B", "= np.array(args[0]) pdf=beta.pdf(data,a,b,loc=0,scale=1) lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll =", "print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q", ">= float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)] # Qsig=Q[a1 >=", "float(args.count_cutoff)] # Qsig=Q[a1 >= float(args.count_cutoff)] #outfilename=args.output_path+'/'+args.sample_name+'2.vcf' #write_vcf(outfilename,rout,Qsig,args.reference_file) if __name__=='__main__': args=parseArgs()", "lg=np.log(pdf) #lg=np.where(lg==-np.inf,0,lg) mask = np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll)", "#a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10", "of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to file with", "'--out_file',dest='out_file',help=\"name of output file, default = %(default)s]\",default=\"bgmodel.params\") parser.add_argument('-f','--fsize',dest='fsize', help='Family size", "f: for line in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if", "help='Path to cons file, for fitting parameters of the bgmodel')", "'--non-background-positions', dest='nonbgposfile', help='Path to file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name", "if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0] args.fsize=int(args.fsize) f1,n1,a1,pos,data=parse_cons_file(args.cons_file,args.fsize) f1 = np.array(f1) n1", "c1=[] posx=[] data=[] with open(filename) as f: for line in", "= np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w')", "parse_cons_file(filename,fsize=3): n1=[] f1=[] c1=[] posx=[] data=[] with open(filename) as f:", "parser = argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon \\ sequencing data", "not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line) #print(name)", "a1 = np.array(a1) pos = np.array(pos) data = np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True])", "beta as B from scipy.special import comb import argparse import", "args.nonbgposfile: nonbgpos=[] with open(args.nonbgposfile) as f: for line in f:", "as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data)", "g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10 #a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png')", "B from scipy.special import comb import argparse import sys def", "np.array(data) result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1]))", "fmin from scipy.stats import beta from scipy.special import beta as", "b0=(1-m)*(m * (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args):", "sequencing data with Unique molecular \\ identifiers (UMI)\") parser.add_argument('-cons', '--cons_file',", "if not line.startswith('Sample Name'): line=line.rstrip('\\n') parts=line.split('\\t') pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if", "if int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac > 0 and", "to file with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file,", "from scipy.special import beta as B from scipy.special import comb", "with non-background positions') parser.add_argument('-out', '--out_file',dest='out_file',help=\"name of output file, default =", "as B from scipy.special import comb import argparse import sys", "as f: for line in f: if not line.startswith('Sample Name'):", "mask = np.isfinite(lg) nll = -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data):", "v=np.var(data) a0=m*(m * (1-m) / v-1 ) b0=(1-m)*(m * (1-m)", "nonbgpos=[] with open(args.nonbgposfile) as f: for line in f: line=line.rstrip()", "int(famsize)==fsize: frac=float(parts[-2]) alt=parts[-1] count=parts[-3] if frac > 0 and alt", "* (1-m) / v-1 ) result=fmin(betaNLL,[a0,b0],args=(data,)) return(result) def run_fit_bgmodel(args): spikepositions=[178952085,55599321,7577558,7577547,7577538,7577120]", "#if args.vc_method.lower()=='bbmodel': # rout=data[Q >= float(args.qvalue_threshold)] # Qsig=Q[Q >= float(args.qvalue_threshold)]", "in f: line=line.rstrip() nonbgpos.append(line) else: nonbgpos=spikepositions if not args.cons_file: args.cons_file=glob.glob(args.output_path+'/*cons.tsv')[0]", "from numpy import nan from scipy.optimize import fmin from scipy.stats", "result=get_beta_parameters(f1[np.isin(pos,nonbgpos)!=True]) #a=prob_bb(n1,a1,result[0],result[1]) print(pos,nonbgpos,np.isin(pos,nonbgpos)) with open(args.out_file,'w') as g: g.write('{}\\n'.format(result[0])) g.write('{}\\n'.format(result[1])) #a[a==inf]=1e-10", "alt not in 'N': cov=int(parts[-5]) f1.append(float(frac)) n1.append(int(cov)) c1.append(int(count)) posx.append(pos) data.append(line)", "fitting parameters of the bgmodel') parser.add_argument('-nonbgposfile', '--non-background-positions', dest='nonbgposfile', help='Path to", "with open(args.nonbgposfile) as f: for line in f: line=line.rstrip() nonbgpos.append(line)", "parsing arguments''' parser = argparse.ArgumentParser(description=\"Pipeline for analyzing barcoded amplicon \\", "np.array(f1) n1 = np.array(n1) a1 = np.array(a1) pos = np.array(pos)", "get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m * (1-m) / v-1 ) b0=(1-m)*(m", "c1.append(int(count)) posx.append(pos) data.append(line) #print(name) #print(famsize) return(f1,n1,c1,posx,data) def betaNLL(params,*args): a,b =", "= -lg[mask].sum() nll=-1*np.sum(lg) return(nll) def get_beta_parameters(data): m=np.mean(data) v=np.var(data) a0=m*(m *", "pos=parts[1]+':'+parts[2] name=parts[3] #print(name) if name not in \"\": famsize=parts[-4] if", "#print(name) if name not in \"\": famsize=parts[-4] if int(famsize)==fsize: frac=float(parts[-2])", "posx=[] data=[] with open(filename) as f: for line in f:", "#a[np.isnan(a)]=1e-10 #Q = -10*np.log10(a) #data=np.array(data) #plot_histogram(Q,args.output_path+'/'+args.sample_name+'.histogram.png') #if args.vc_method.lower()=='bbmodel': # rout=data[Q", "# Qsig=Q[Q >= float(args.qvalue_threshold)] #else: # rout=data[a1 >= float(args.count_cutoff)] #" ]
[ "np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__ == \"__main__\": import unittest", "2.0 (the \"License\"); # you may not use this file", "language governing permissions and # limitations under the License. ##############################################################################", "for i in range(1 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j,", "in configs for main_type in [np.int32, np.int64] for extra in", "2), ] suffixes = [ [], [((2, 2), np.float32)], [((3,", "from __future__ import division from __future__ import print_function from __future__", "absolute_import from __future__ import division from __future__ import print_function from", "% parts).reshape([-1]) out = [] for i in range(parts): for", "type)]) configs = [ ((10, ), 3), ((4, ), 10),", "actual_out = expected_out + '_actual' op = core.CreateOperator( 'GatherByKey', gather_ins,", "workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims, parts, main_type, extra_ins,", "for i in range(parts): idx = 0 sharded_lengths = np.zeros(elements)", "def testPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs():", "[] data = v.reshape((-1, ) + suffix_shape) if pack and", "range(1 + len(extra_ins)) ] op = core.CreateOperator( 'Partition', ins, outs,", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "(GatherByKey) if len(main_dims) == 1: # currently only 1D key", "10), ((10, 10), 4), ((100, ), 2), ((5, ), 1),", "= ['in' + str(i) for i in range(2 + len(extra_ins))]", "1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1] lengths.append(main_dims[0] - total_length)", "4), ((100, ), 2), ((5, ), 1), ((1, ), 1),", "extra_ins): if t in [np.float32, np.float64]: d = rand_array(*(main_dims +", "range(2 + len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition', ins, outs,", "pack and ind == 0: data = data // parts", "+= 1 out.append(sharded_lengths) for ind, v in enumerate(x): suffix_shape =", "= np.random.randint(2, 10) lengths = [] total_length = 0 for", "main type, [list of (extra dims, type)]) configs = [", "= expected_out + '_actual' op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out)", "return [ (main_dims, parts, main_type, extra, pack) for main_dims, parts", "rand_array class TestPartitionOps(TestCase): def test_configs(self): # (main dims, partitions, main", "def sharding(x): # numpy has proper modulo op that yields", "sharding(x) print(x) print(ref) for name, expected in zip(outs, ref): np.testing.assert_array_equal(", "use this file except in compliance with the License. #", "% parts).reshape([-1]) out = [] for i in range(parts): idx", "pack_first_input=(1 if pack else 0)) x = [] for i,", "if shards[idx] == i: sharded_lengths[ind] += 1 idx += 1", "= d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): # numpy has", "out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) for name, expected", "lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0],", "True] ] def testPartition(self): for main_dims, parts, main_type, extra_ins, pack", "= rand_array(*(main_dims + dims)) else: d = np.random.randint(-100, 100, (main_dims", "// parts for j, s in enumerate(shards): if s ==", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "((4, ), 10), ((10, 10), 4), ((100, ), 2), ((5,", "return out workspace.RunOperatorOnce(op) ref = sharding(x) for name, expected in", "+ 1] for p in range(parts)] actual_out = expected_out +", "return np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum)) return out", "(main_dims, parts, main_type, extra, pack) for main_dims, parts in configs", "i) for i in range(parts) for j in range(1 +", "License. # You may obtain a copy of the License", "parts in configs for main_type in [np.int32, np.int64] for extra", "numpy as np from caffe2.python import core, workspace from caffe2.python.test_util", "workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref) for name, expected in", "(dims, t) in enumerate([((), main_type)] + extra_ins): if t in", "= 0 sharded_lengths = np.zeros(elements) for ind, length in enumerate(lengths):", "np.zeros(elements) for ind, length in enumerate(lengths): for _ in range(length):", "np.float32)], [((3, ), np.int64), ((2, ), np.float32)], ] return [", "under the License is distributed on an \"AS IS\" BASIS,", "i in range(parts): idx = 0 sharded_lengths = np.zeros(elements) for", "out = [] for i in range(parts): for ind, v", "pack else 0) ) x = [] for i, (dims,", "# Randomly generate length tensor as well elements = np.random.randint(2,", "License for the specific language governing permissions and # limitations", "lengths = [] total_length = 0 for _ in range(elements", "under the License. ############################################################################## from __future__ import absolute_import from __future__", "+ 1] gather_ins = [ins[0]] + [ outs[len(ins) * p", "else 0)) x = [] for i, (dims, t) in", "configs = [ ((10, ), 3), ((4, ), 10), ((10,", "in [np.int32, np.int64] for extra in suffixes for pack in", "expected, workspace.FetchBlob(name) ) # test inverse operation (GatherByKey) if len(main_dims)", "and ind == 0: data = data // parts for", "d) x.append(d) # Randomly generate length tensor as well elements", "parts, main_type, extra, pack) for main_dims, parts in configs for", "1D key tensor supported for i in range(len(extra_ins)): expected_out =", "length tensor as well elements = np.random.randint(2, 10) lengths =", "pack in self.test_configs(): ins = ['in' + str(i) for i", "in range(parts): for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):]", "= [ ((10, ), 3), ((4, ), 10), ((10, 10),", "+ i + 1] for p in range(parts)] actual_out =", "import TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self): # (main dims,", "= ['in' + str(i) for i in range(1 + len(extra_ins))]", "has proper modulo op that yields non-negative results shards =", "p in range(parts)] actual_out = expected_out + '_actual' op =", "from __future__ import absolute_import from __future__ import division from __future__", "10) lengths = [] total_length = 0 for _ in", "enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1,", "ind, length in enumerate(lengths): for _ in range(length): if shards[idx]", "in compliance with the License. # You may obtain a", "for pack in [False, True] ] def testPartition(self): for main_dims,", "i) for i in range(parts) for j in range(2 +", "software # distributed under the License is distributed on an", "import unicode_literals import numpy as np from caffe2.python import core,", "op = core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if pack else", "############################################################################## from __future__ import absolute_import from __future__ import division from", "in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test inverse", "name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if", "> 1: continue ins = ['in' + str(i) for i", "'LengthsPartition', ins, outs, pack_first_input=(1 if pack else 0) ) x", "in range(1 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for", "limitations under the License. ############################################################################## from __future__ import absolute_import from", "= [ 'in{}_p{}'.format(j, i) for i in range(parts) for j", "parts).reshape([-1]) out = [] for i in range(parts): for ind,", "p + i + 1] for p in range(parts)] actual_out", "ins[i + 1] gather_ins = [ins[0]] + [ outs[len(ins) *", "(main dims, partitions, main type, [list of (extra dims, type)])", "# For LengthsSharding only 1-D tensors supported as a first", "division from __future__ import print_function from __future__ import unicode_literals import", "of (extra dims, type)]) configs = [ ((10, ), 3),", "i: sharded_lengths[ind] += 1 idx += 1 out.append(sharded_lengths) for ind,", "v.reshape((-1, ) + suffix_shape) if pack and ind == 0:", "continue ins = ['in' + str(i) for i in range(2", "else: d = np.random.randint(-100, 100, (main_dims + dims)) d =", "[] for i in range(parts): for ind, v in enumerate(x):", "in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__ ==", "i in range(parts) for j in range(1 + len(extra_ins)) ]", "np from caffe2.python import core, workspace from caffe2.python.test_util import TestCase,", "), np.float32)], ] return [ (main_dims, parts, main_type, extra, pack)", "= d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d) # Randomly generate", "parts).reshape([-1]) out = [] for i in range(parts): idx =", "2), ((5, ), 1), ((1, ), 1), ((2, 10), 2),", "generate length tensor as well elements = np.random.randint(2, 10) lengths", "np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test inverse operation (GatherByKey) if", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "out workspace.RunOperatorOnce(op) ref = sharding(x) for name, expected in zip(outs,", "[ 'in{}_p{}'.format(j, i) for i in range(parts) for j in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "((2, 10), 2), ] suffixes = [ [], [((2, 2),", "range(parts) for j in range(2 + len(extra_ins)) ] op =", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "i: accum.append(data[j]) def join(a): if not a: return np.empty(shape=(0, )", "== i: sharded_lengths[ind] += 1 idx += 1 out.append(sharded_lengths) for", "testLengthsPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): #", "to in writing, software # distributed under the License is", "tensors supported as a first input if len(main_dims) > 1:", "# See the License for the specific language governing permissions", "= v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1, ) +", "(main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i + 1], d)", "1), ((1, ), 1), ((2, 10), 2), ] suffixes =", "for j in range(2 + len(extra_ins)) ] op = core.CreateOperator(", "j in range(1 + len(extra_ins)) ] op = core.CreateOperator( 'Partition',", "((1, ), 1), ((2, 10), 2), ] suffixes = [", "] op = core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if pack", "in suffixes for pack in [False, True] ] def testPartition(self):", "or agreed to in writing, software # distributed under the", "ref = sharding(x) for name, expected in zip(outs, ref): np.testing.assert_array_equal(", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op)", "workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): # numpy has proper modulo", "with the License. # You may obtain a copy of", "1: # currently only 1D key tensor supported for i", "if len(main_dims) == 1: # currently only 1D key tensor", "np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d)", "out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref) for", "main_type, extra_ins, pack in self.test_configs(): ins = ['in' + str(i)", "[list of (extra dims, type)]) configs = [ ((10, ),", "(c) 2016-present, Facebook, Inc. # # Licensed under the Apache", "TestPartitionOps(TestCase): def test_configs(self): # (main dims, partitions, main type, [list", "governing permissions and # limitations under the License. ############################################################################## from", "v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1, ) + suffix_shape)", "results shards = (x[0] % parts).reshape([-1]) out = [] for", "dims, type)]) configs = [ ((10, ), 3), ((4, ),", "as a first input if len(main_dims) > 1: continue ins", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "from __future__ import print_function from __future__ import unicode_literals import numpy", "t) in enumerate([((), main_type)] + extra_ins): if t in [np.float32,", "in [False, True] ] def testPartition(self): for main_dims, parts, main_type,", "core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if pack else 0) )", "distributed under the License is distributed on an \"AS IS\"", "suffixes for pack in [False, True] ] def testPartition(self): for", "ref = sharding(x) print(x) print(ref) for name, expected in zip(outs,", "gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected,", "x = [] for i, (dims, t) in enumerate([((), main_type)]", "0: data = data // parts for j, s in", "'_actual' op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected =", "enumerate(lengths): for _ in range(length): if shards[idx] == i: sharded_lengths[ind]", "2016-present, Facebook, Inc. # # Licensed under the Apache License,", "currently only 1D key tensor supported for i in range(len(extra_ins)):", "((5, ), 1), ((1, ), 1), ((2, 10), 2), ]", "1-D tensors supported as a first input if len(main_dims) >", "range(len(extra_ins)): expected_out = ins[i + 1] gather_ins = [ins[0]] +", "['in' + str(i) for i in range(2 + len(extra_ins))] outs", "express or implied. # See the License for the specific", "the License. ############################################################################## from __future__ import absolute_import from __future__ import", "expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test", "except in compliance with the License. # You may obtain", "'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out)", "(x[0] % parts).reshape([-1]) out = [] for i in range(parts):", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "LengthsSharding only 1-D tensors supported as a first input if", "not use this file except in compliance with the License.", "= workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims, parts, main_type,", "for _ in range(length): if shards[idx] == i: sharded_lengths[ind] +=", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "if pack else 0)) x = [] for i, (dims,", "in range(length): if shards[idx] == i: sharded_lengths[ind] += 1 idx", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = [] data =", "workspace from caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self):", "for i, (dims, t) in enumerate([((), main_type)] + extra_ins): if", "d = rand_array(*(main_dims + dims)) else: d = np.random.randint(-100, 100,", "a: return np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum)) return", "np.array(lengths, dtype=np.int32)) def sharding(x): # numpy has proper modulo op", "3), ((4, ), 10), ((10, 10), 4), ((100, ), 2),", "CONDITIONS OF ANY KIND, either express or implied. # See", "for i in range(parts): for ind, v in enumerate(x): suffix_shape", "in [np.float32, np.float64]: d = rand_array(*(main_dims + dims)) else: d", "from caffe2.python import core, workspace from caffe2.python.test_util import TestCase, rand_array", "pack else 0)) x = [] for i, (dims, t)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "1 out.append(sharded_lengths) for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):]", "# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under", "= [] total_length = 0 for _ in range(elements -", "] def testPartition(self): for main_dims, parts, main_type, extra_ins, pack in", "type, [list of (extra dims, type)]) configs = [ ((10,", "that yields non-negative results shards = (x[0] % parts).reshape([-1]) out", "extra_ins, pack in self.test_configs(): ins = ['in' + str(i) for", "= core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual", "1], d) x.append(d) # Randomly generate length tensor as well", "elements = np.random.randint(2, 10) lengths = [] total_length = 0", "ins = ['in' + str(i) for i in range(2 +", "test_configs(self): # (main dims, partitions, main type, [list of (extra", "ind == 0: data = data // parts for j,", "expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self):", "str(i) for i in range(1 + len(extra_ins))] outs = [", "# test inverse operation (GatherByKey) if len(main_dims) == 1: #", "= workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for", "operation (GatherByKey) if len(main_dims) == 1: # currently only 1D", "suffix_shape) if pack and ind == 0: data = data", "ins, outs, pack_first_input=(1 if pack else 0) ) x =", "total_length = 0 for _ in range(elements - 1): lengths.append(np.random.randint(main_dims[0]", "OR CONDITIONS OF ANY KIND, either express or implied. #", "dtype=np.int32)) def sharding(x): # numpy has proper modulo op that", "self.test_configs(): ins = ['in' + str(i) for i in range(1", "d = d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d) # Randomly", "i + 1] for p in range(parts)] actual_out = expected_out", "1: continue ins = ['in' + str(i) for i in", "the License is distributed on an \"AS IS\" BASIS, #", "sharding(x): # numpy has proper modulo op that yields non-negative", "for main_dims, parts in configs for main_type in [np.int32, np.int64]", "return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) print(x)", "range(parts)] actual_out = expected_out + '_actual' op = core.CreateOperator( 'GatherByKey',", "core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual =", "main_dims, parts in configs for main_type in [np.int32, np.int64] for", "core, workspace from caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase): def", "sharded_lengths[ind] += 1 idx += 1 out.append(sharded_lengths) for ind, v", "for j, s in enumerate(shards): if s == i: accum.append(data[j])", "= [] data = v.reshape((-1, ) + suffix_shape) if pack", "def testLengthsPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs():", "), 3), ((4, ), 10), ((10, 10), 4), ((100, ),", "len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if", "extra, pack) for main_dims, parts in configs for main_type in", "pack_first_input=(1 if pack else 0) ) x = [] for", "dims)) d = d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d) #", "j in range(2 + len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition',", "if pack and ind == 0: data = data //", "== 1: # currently only 1D key tensor supported for", "for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum =", "outs[len(ins) * p + i + 1] for p in", "law or agreed to in writing, software # distributed under", "s == i: accum.append(data[j]) def join(a): if not a: return", "main_dims, parts, main_type, extra_ins, pack in self.test_configs(): ins = ['in'", "np.float64]: d = rand_array(*(main_dims + dims)) else: d = np.random.randint(-100,", "import division from __future__ import print_function from __future__ import unicode_literals", "def join(a): if not a: return np.empty(shape=(0, ) + suffix_shape)", "gather_ins = [ins[0]] + [ outs[len(ins) * p + i", "range(length): if shards[idx] == i: sharded_lengths[ind] += 1 idx +=", "actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims, parts,", "+ dims)) d = d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d)", "[] for i in range(parts): idx = 0 sharded_lengths =", "[ ((10, ), 3), ((4, ), 10), ((10, 10), 4),", "+ suffix_shape) if pack and ind == 0: data =", "pack in self.test_configs(): # For LengthsSharding only 1-D tensors supported", "import core, workspace from caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase):", "i in range(parts): for ind, v in enumerate(x): suffix_shape =", "for ind, length in enumerate(lengths): for _ in range(length): if", "as np from caffe2.python import core, workspace from caffe2.python.test_util import", "range(1 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i", "may obtain a copy of the License at # #", "class TestPartitionOps(TestCase): def test_configs(self): # (main dims, partitions, main type,", "range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1] lengths.append(main_dims[0]", "for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): ins =", "= 0 for _ in range(elements - 1): lengths.append(np.random.randint(main_dims[0] -", "1), ((2, 10), 2), ] suffixes = [ [], [((2,", "core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if pack else 0)) x", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Inc. # # Licensed under the Apache License, Version 2.0", "ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__ == \"__main__\": import", "self.test_configs(): # For LengthsSharding only 1-D tensors supported as a", "+= lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x):", "== i: accum.append(data[j]) def join(a): if not a: return np.empty(shape=(0,", "= [ins[0]] + [ outs[len(ins) * p + i +", "workspace.FeedBlob(ins[i + 1], d) x.append(d) # Randomly generate length tensor", "print_function from __future__ import unicode_literals import numpy as np from", "not a: return np.empty(shape=(0, ) + suffix_shape) return np.stack(a) out.append(join(accum))", "may not use this file except in compliance with the", "d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): # numpy has proper", "actual) def testLengthsPartition(self): for main_dims, parts, main_type, extra_ins, pack in", "op that yields non-negative results shards = (x[0] % parts).reshape([-1])", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "np.int64] for extra in suffixes for pack in [False, True]", "this file except in compliance with the License. # You", "(main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def", "), np.int64), ((2, ), np.float32)], ] return [ (main_dims, parts,", "import absolute_import from __future__ import division from __future__ import print_function", "+= 1 idx += 1 out.append(sharded_lengths) for ind, v in", "and # limitations under the License. ############################################################################## from __future__ import", "a first input if len(main_dims) > 1: continue ins =", "ins = ['in' + str(i) for i in range(1 +", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "+ len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1", "dims, partitions, main type, [list of (extra dims, type)]) configs", "# # Licensed under the Apache License, Version 2.0 (the", "op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out)", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "+ len(extra_ins)) ] op = core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1", "pack) for main_dims, parts in configs for main_type in [np.int32,", "= data // parts for j, s in enumerate(shards): if", "x.append(d) # Randomly generate length tensor as well elements =", "'in{}_p{}'.format(j, i) for i in range(parts) for j in range(1", "in range(parts) for j in range(1 + len(extra_ins)) ] op", "in enumerate(lengths): for _ in range(length): if shards[idx] == i:", "), 1), ((2, 10), 2), ] suffixes = [ [],", "as well elements = np.random.randint(2, 10) lengths = [] total_length", "+ len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i in", "zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__ == \"__main__\":", "main_dims, parts, main_type, extra_ins, pack in self.test_configs(): # For LengthsSharding", "for extra in suffixes for pack in [False, True] ]", "outs, pack_first_input=(1 if pack else 0)) x = [] for", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "), 10), ((10, 10), 4), ((100, ), 2), ((5, ),", "only 1D key tensor supported for i in range(len(extra_ins)): expected_out", "['in' + str(i) for i in range(1 + len(extra_ins))] outs", "input if len(main_dims) > 1: continue ins = ['in' +", "len(main_dims) > 1: continue ins = ['in' + str(i) for", "import print_function from __future__ import unicode_literals import numpy as np", "np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i +", "expected_out + '_actual' op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op)", "0 for _ in range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length))", "i in range(parts) for j in range(2 + len(extra_ins)) ]", "for p in range(parts)] actual_out = expected_out + '_actual' op", "+ extra_ins): if t in [np.float32, np.float64]: d = rand_array(*(main_dims", "Facebook, Inc. # # Licensed under the Apache License, Version", "range(parts) for j in range(1 + len(extra_ins)) ] op =", "np.int64), ((2, ), np.float32)], ] return [ (main_dims, parts, main_type,", "10), 2), ] suffixes = [ [], [((2, 2), np.float32)],", "modulo op that yields non-negative results shards = (x[0] %", "total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): # numpy has proper", "[((3, ), np.int64), ((2, ), np.float32)], ] return [ (main_dims,", "in range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1]", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "parts, main_type, extra_ins, pack in self.test_configs(): # For LengthsSharding only", "License. ############################################################################## from __future__ import absolute_import from __future__ import division", "return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) for", "supported as a first input if len(main_dims) > 1: continue", "10), 4), ((100, ), 2), ((5, ), 1), ((1, ),", "len(main_dims) == 1: # currently only 1D key tensor supported", ") + suffix_shape) if pack and ind == 0: data", "0 sharded_lengths = np.zeros(elements) for ind, length in enumerate(lengths): for", "out workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref) for name, expected", "or implied. # See the License for the specific language", "1 idx += 1 out.append(sharded_lengths) for ind, v in enumerate(x):", "__future__ import division from __future__ import print_function from __future__ import", "zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test inverse operation", "in self.test_configs(): # For LengthsSharding only 1-D tensors supported as", "__future__ import unicode_literals import numpy as np from caffe2.python import", "sharded_lengths = np.zeros(elements) for ind, length in enumerate(lengths): for _", "shards[idx] == i: sharded_lengths[ind] += 1 idx += 1 out.append(sharded_lengths)", "total_length += lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "print(x) print(ref) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected,", ") # test inverse operation (GatherByKey) if len(main_dims) == 1:", "= [ [], [((2, 2), np.float32)], [((3, ), np.int64), ((2,", "outs, pack_first_input=(1 if pack else 0) ) x = []", "partitions, main type, [list of (extra dims, type)]) configs =", "np.float32)], ] return [ (main_dims, parts, main_type, extra, pack) for", "from caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self): #", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "numpy has proper modulo op that yields non-negative results shards", "out = [] for i in range(parts): idx = 0", "actual_out) workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual)", "100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i + 1],", "] op = core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if pack", "= np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i", "enumerate([((), main_type)] + extra_ins): if t in [np.float32, np.float64]: d", "in range(parts)] actual_out = expected_out + '_actual' op = core.CreateOperator(", "expected, workspace.FetchBlob(name) ) if __name__ == \"__main__\": import unittest unittest.main()", ") + suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref", "(the \"License\"); # you may not use this file except", "# you may not use this file except in compliance", "x.append(d) def sharding(x): # numpy has proper modulo op that", "), 1), ((1, ), 1), ((2, 10), 2), ] suffixes", "in range(parts) for j in range(2 + len(extra_ins)) ] op", "for j in range(1 + len(extra_ins)) ] op = core.CreateOperator(", "((10, 10), 4), ((100, ), 2), ((5, ), 1), ((1,", "else 0) ) x = [] for i, (dims, t)", "expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) if __name__", "print(ref) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name)", "configs for main_type in [np.int32, np.int64] for extra in suffixes", "i in range(1 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i)", "i in range(len(extra_ins)): expected_out = ins[i + 1] gather_ins =", "# # Unless required by applicable law or agreed to", "= sharding(x) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected,", "out.append(sharded_lengths) for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum", "Randomly generate length tensor as well elements = np.random.randint(2, 10)", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "idx = 0 sharded_lengths = np.zeros(elements) for ind, length in", "Version 2.0 (the \"License\"); # you may not use this", "join(a): if not a: return np.empty(shape=(0, ) + suffix_shape) return", "= ins[i + 1] gather_ins = [ins[0]] + [ outs[len(ins)", "parts for j, s in enumerate(shards): if s == i:", "unicode_literals import numpy as np from caffe2.python import core, workspace", "TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self): # (main dims, partitions,", "[ outs[len(ins) * p + i + 1] for p", "data // parts for j, s in enumerate(shards): if s", "= v.reshape((-1, ) + suffix_shape) if pack and ind ==", "__future__ import absolute_import from __future__ import division from __future__ import", "_ in range(length): if shards[idx] == i: sharded_lengths[ind] += 1", "implied. # See the License for the specific language governing", "workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): # numpy has proper modulo", "under the Apache License, Version 2.0 (the \"License\"); # you", "workspace.FetchBlob(name) ) # test inverse operation (GatherByKey) if len(main_dims) ==", "in self.test_configs(): ins = ['in' + str(i) for i in", "lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): #", "test inverse operation (GatherByKey) if len(main_dims) == 1: # currently", "'in{}_p{}'.format(j, i) for i in range(parts) for j in range(2", "name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) #", "tensor supported for i in range(len(extra_ins)): expected_out = ins[i +", "for main_type in [np.int32, np.int64] for extra in suffixes for", "by applicable law or agreed to in writing, software #", "* p + i + 1] for p in range(parts)]", "[], [((2, 2), np.float32)], [((3, ), np.int64), ((2, ), np.float32)],", "] return [ (main_dims, parts, main_type, extra, pack) for main_dims,", "(extra dims, type)]) configs = [ ((10, ), 3), ((4,", "pack in [False, True] ] def testPartition(self): for main_dims, parts,", "sharding(x) for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name)", "d) x.append(d) def sharding(x): # numpy has proper modulo op", "s in enumerate(shards): if s == i: accum.append(data[j]) def join(a):", "for i in range(parts) for j in range(2 + len(extra_ins))", "np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) for name,", "[ [], [((2, 2), np.float32)], [((3, ), np.int64), ((2, ),", "len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i in range(parts)", "in enumerate([((), main_type)] + extra_ins): if t in [np.float32, np.float64]:", "+ '_actual' op = core.CreateOperator( 'GatherByKey', gather_ins, actual_out) workspace.RunOperatorOnce(op) expected", "length in enumerate(lengths): for _ in range(length): if shards[idx] ==", "[ (main_dims, parts, main_type, extra, pack) for main_dims, parts in", "[((2, 2), np.float32)], [((3, ), np.int64), ((2, ), np.float32)], ]", "# currently only 1D key tensor supported for i in", "1] for p in range(parts)] actual_out = expected_out + '_actual'", "enumerate(shards): if s == i: accum.append(data[j]) def join(a): if not", "inverse operation (GatherByKey) if len(main_dims) == 1: # currently only", "in range(parts): idx = 0 sharded_lengths = np.zeros(elements) for ind,", "[False, True] ] def testPartition(self): for main_dims, parts, main_type, extra_ins,", "i, (dims, t) in enumerate([((), main_type)] + extra_ins): if t", "idx += 1 out.append(sharded_lengths) for ind, v in enumerate(x): suffix_shape", "dims)) else: d = np.random.randint(-100, 100, (main_dims + dims)) d", "), 2), ((5, ), 1), ((1, ), 1), ((2, 10),", "proper modulo op that yields non-negative results shards = (x[0]", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "d.astype(t) workspace.FeedBlob(ins[i + 1], d) x.append(d) # Randomly generate length", "Unless required by applicable law or agreed to in writing,", "+ dims)) else: d = np.random.randint(-100, 100, (main_dims + dims))", "def test_configs(self): # (main dims, partitions, main type, [list of", "accum.append(data[j]) def join(a): if not a: return np.empty(shape=(0, ) +", "= core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if pack else 0))", "supported for i in range(len(extra_ins)): expected_out = ins[i + 1]", "ins, outs, pack_first_input=(1 if pack else 0)) x = []", "the specific language governing permissions and # limitations under the", "data = data // parts for j, s in enumerate(shards):", "for name, expected in zip(outs, ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) )", "well elements = np.random.randint(2, 10) lengths = [] total_length =", "if t in [np.float32, np.float64]: d = rand_array(*(main_dims + dims))", "= core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if pack else 0)", "range(parts): for ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum", "applicable law or agreed to in writing, software # distributed", "if not a: return np.empty(shape=(0, ) + suffix_shape) return np.stack(a)", "i in range(2 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i)", "# limitations under the License. ############################################################################## from __future__ import absolute_import", "t in [np.float32, np.float64]: d = rand_array(*(main_dims + dims)) else:", "permissions and # limitations under the License. ############################################################################## from __future__", "d = np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t)", "from __future__ import unicode_literals import numpy as np from caffe2.python", "# numpy has proper modulo op that yields non-negative results", "data = v.reshape((-1, ) + suffix_shape) if pack and ind", "in range(2 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for", "= [] for i, (dims, t) in enumerate([((), main_type)] +", "in writing, software # distributed under the License is distributed", "+ 1], d) x.append(d) # Randomly generate length tensor as", "first input if len(main_dims) > 1: continue ins = ['in'", "yields non-negative results shards = (x[0] % parts).reshape([-1]) out =", "for _ in range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length", "0) ) x = [] for i, (dims, t) in", "op = core.CreateOperator( 'LengthsPartition', ins, outs, pack_first_input=(1 if pack else", "[np.float32, np.float64]: d = rand_array(*(main_dims + dims)) else: d =", "= np.zeros(elements) for ind, length in enumerate(lengths): for _ in", "suffixes = [ [], [((2, 2), np.float32)], [((3, ), np.int64),", "[ins[0]] + [ outs[len(ins) * p + i + 1]", "v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = [] data", "100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d)", "suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x)", "parts, main_type, extra_ins, pack in self.test_configs(): ins = ['in' +", "if s == i: accum.append(data[j]) def join(a): if not a:", "suffix_shape = v.shape[len(x[0].shape):] accum = [] data = v.reshape((-1, )", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if pack else 0) ) x = [] for i,", "License, Version 2.0 (the \"License\"); # you may not use", "outs = [ 'in{}_p{}'.format(j, i) for i in range(parts) for", "main_type, extra, pack) for main_dims, parts in configs for main_type", "main_type, extra_ins, pack in self.test_configs(): # For LengthsSharding only 1-D", "# You may obtain a copy of the License at", "import numpy as np from caffe2.python import core, workspace from", "2), np.float32)], [((3, ), np.int64), ((2, ), np.float32)], ] return", "only 1-D tensors supported as a first input if len(main_dims)", "expected_out = ins[i + 1] gather_ins = [ins[0]] + [", "[] for i, (dims, t) in enumerate([((), main_type)] + extra_ins):", "For LengthsSharding only 1-D tensors supported as a first input", "in range(2 + len(extra_ins)) ] op = core.CreateOperator( 'LengthsPartition', ins,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "+ dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x):", "in range(1 + len(extra_ins)) ] op = core.CreateOperator( 'Partition', ins,", "in enumerate(shards): if s == i: accum.append(data[j]) def join(a): if", "if len(main_dims) > 1: continue ins = ['in' + str(i)", "np.random.randint(2, 10) lengths = [] total_length = 0 for _", "lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): # numpy", "the License for the specific language governing permissions and #", "testPartition(self): for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): ins", "# (main dims, partitions, main type, [list of (extra dims,", "Apache License, Version 2.0 (the \"License\"); # you may not", "((2, ), np.float32)], ] return [ (main_dims, parts, main_type, extra,", "either express or implied. # See the License for the", "+ str(i) for i in range(1 + len(extra_ins))] outs =", "+ str(i) for i in range(2 + len(extra_ins))] outs =", "main_type)] + extra_ins): if t in [np.float32, np.float64]: d =", "range(2 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j, i) for i", "1] gather_ins = [ins[0]] + [ outs[len(ins) * p +", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "workspace.RunOperatorOnce(op) ref = sharding(x) for name, expected in zip(outs, ref):", "[] total_length = 0 for _ in range(elements - 1):", "range(parts): idx = 0 sharded_lengths = np.zeros(elements) for ind, length", "str(i) for i in range(2 + len(extra_ins))] outs = [", "- 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length += lengths[-1] lengths.append(main_dims[0] -", "= (x[0] % parts).reshape([-1]) out = [] for i in", "tensor as well elements = np.random.randint(2, 10) lengths = []", "= [] for i in range(parts): for ind, v in", "main_type in [np.int32, np.int64] for extra in suffixes for pack", "caffe2.python.test_util import TestCase, rand_array class TestPartitionOps(TestCase): def test_configs(self): # (main", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "for i in range(len(extra_ins)): expected_out = ins[i + 1] gather_ins", "ref): np.testing.assert_array_equal( expected, workspace.FetchBlob(name) ) # test inverse operation (GatherByKey)", "Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the", "for i in range(2 + len(extra_ins))] outs = [ 'in{}_p{}'.format(j,", "rand_array(*(main_dims + dims)) else: d = np.random.randint(-100, 100, (main_dims +", "np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref)", "+ [ outs[len(ins) * p + i + 1] for", "= sharding(x) print(x) print(ref) for name, expected in zip(outs, ref):", "shards = (x[0] % parts).reshape([-1]) out = [] for i", "non-negative results shards = (x[0] % parts).reshape([-1]) out = []", "np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims, parts, main_type, extra_ins, pack", "'Partition', ins, outs, pack_first_input=(1 if pack else 0)) x =", "workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def testLengthsPartition(self): for main_dims,", "\"License\"); # you may not use this file except in", "[np.int32, np.int64] for extra in suffixes for pack in [False,", "extra in suffixes for pack in [False, True] ] def", "dims)) d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): #", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "] suffixes = [ [], [((2, 2), np.float32)], [((3, ),", "ind, v in enumerate(x): suffix_shape = v.shape[len(x[0].shape):] accum = []", "extra_ins, pack in self.test_configs(): # For LengthsSharding only 1-D tensors", "# distributed under the License is distributed on an \"AS", "- total_length)) total_length += lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths,", "# Unless required by applicable law or agreed to in", "d = d.astype(t) workspace.FeedBlob(ins[i], d) x.append(d) def sharding(x): # numpy", ") x = [] for i, (dims, t) in enumerate([((),", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "_ in range(elements - 1): lengths.append(np.random.randint(main_dims[0] - total_length)) total_length +=", "+ suffix_shape) return np.stack(a) out.append(join(accum)) return out workspace.RunOperatorOnce(op) ref =", "= np.random.randint(-100, 100, (main_dims + dims)) d = d.astype(t) workspace.FeedBlob(ins[i],", "return out workspace.RunOperatorOnce(op) ref = sharding(x) print(x) print(ref) for name,", "total_length)) total_length += lengths[-1] lengths.append(main_dims[0] - total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))", "workspace.RunOperatorOnce(op) expected = workspace.FetchBlob(expected_out) actual = workspace.FetchBlob(actual_out) np.testing.assert_array_equal(expected, actual) def", "for i in range(parts) for j in range(1 + len(extra_ins))", "You may obtain a copy of the License at #", "in range(len(extra_ins)): expected_out = ins[i + 1] gather_ins = [ins[0]]", "len(extra_ins)) ] op = core.CreateOperator( 'Partition', ins, outs, pack_first_input=(1 if", "caffe2.python import core, workspace from caffe2.python.test_util import TestCase, rand_array class", "((100, ), 2), ((5, ), 1), ((1, ), 1), ((2,", "__future__ import print_function from __future__ import unicode_literals import numpy as", "= [] for i in range(parts): idx = 0 sharded_lengths", "j, s in enumerate(shards): if s == i: accum.append(data[j]) def", "for main_dims, parts, main_type, extra_ins, pack in self.test_configs(): # For", "accum = [] data = v.reshape((-1, ) + suffix_shape) if", "((10, ), 3), ((4, ), 10), ((10, 10), 4), ((100,", "the Apache License, Version 2.0 (the \"License\"); # you may", "- total_length) workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32)) def sharding(x): # numpy has", "key tensor supported for i in range(len(extra_ins)): expected_out = ins[i", "0)) x = [] for i, (dims, t) in enumerate([((),", "== 0: data = data // parts for j, s" ]
[ "'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity =", "super(Fib, self).__init__() self._top_entity = None self.yang_name = \"fib\" self.yang_parent_name =", "= 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity", "Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass", "self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type',", "**type**\\: int **range:** 3..180 **units**\\: second \"\"\" _prefix = 'fib-common-cfg'", "False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([])", "= self self._children_name_map[\"platform\"] = \"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen", "+ \"[forward-class-number='\" + str(self.forward_class_number) + \"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\"", "the following management objects\\: fib\\: CEF configuration Copyright (c) 2013\\-2018", "self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number',", "= False self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs =", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute:: fallback_class_number_array Set PBTS fallback class", "frr_follow_bgp_pic Set option for fast\\-reroute to follow BGP PIC update,", "(\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path =", "PBTS class for fallback .. attribute:: forward_class_number (key) PBTS forward", "name, value) class Platform(Entity): \"\"\" FIB platform parameters .. attribute::", "self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names", "OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf,", "IOS\\-XR fib\\-common package configuration. This module contains definitions for the", "\"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks,", "import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import", "= Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path =", "\"\"\" Options for label\\-switched\\-multicast parameters .. attribute:: frr_holdtime Set time", "a collection of YANG definitions for Cisco IOS\\-XR fib\\-common package", "Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class) Fib pbts", "Fallback to class number list .. data:: any = 2", ".. attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` ..", "class number list .. data:: any = 2 Fallback to", "by Cisco Systems, Inc. All rights reserved. \"\"\" from collections", "for timeout **type**\\: bool \"\"\" _prefix = 'fib-common-cfg' _revision =", "self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda:", "self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity = Fib() return", "any = 8 Any class \"\"\" any = Enum.YLeaf(8, \"any\")", "True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\",", "= False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)),", "= OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])),", "union of the below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int", "Systems, Inc. All rights reserved. \"\"\" from collections import OrderedDict", "list of int **range:** 0..7 \"\"\" _prefix = 'fib-common-cfg' _revision", ".. attribute:: auto_hash_recover Set option for automatcially recovering consistent\\-hashing state", "None self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" %", "_prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib, self).__init__()", "**type**\\: union of the below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\:", ".. data:: any = 2 Fallback to any class ..", "\"\"\" PBTS class configuration .. attribute:: pbts_forward_class_fallback Set PBTS class", "adjacency routes overriding RIB routes **type**\\: bool .. attribute:: encap_sharing_disable", "'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])),", "self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def __setattr__(self, name,", "= lambda: \"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen", "sharing **type**\\: bool .. attribute:: frr_follow_bgp_pic Set option for fast\\-reroute", "int **range:** 0..7 \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01'", "import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class", "for Cisco IOS\\-XR fib\\-common package configuration. This module contains definitions", "_revision = '2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity = None", "class .. data:: drop = 3 Fallback to drop \"\"\"", "self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'],", "self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity): \"\"\" FIB", "encapsulation sharing **type**\\: bool .. attribute:: frr_follow_bgp_pic Set option for", "(\"platform\", (\"platform\", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])),", "fallback class number array **type**\\: list of int **range:** 0..7", "\"\"\" FibPbtsForwardClass (Enum Class) Fib pbts forward class .. data::", "[], name, value) class LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast parameters", "forward class .. data:: any = 8 Any class \"\"\"", "Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"] = \"platform\" self._segment_path = lambda:", "= None self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\"", "self).__init__() self._top_entity = None self.yang_name = \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\"", "auto_hash_recover Set option for automatcially recovering consistent\\-hashing state on interface", "any class .. data:: drop = 3 Fallback to drop", "= self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent =", "Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\" CEF configuration .. attribute:: pbts_forward_class_fallbacks", "YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter", "self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"]", "__setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class", "self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent", "False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\",", "(\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent =", "def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self):", "('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean,", "= True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value)", "= [] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict()", "encap_sharing_disable Set true to disable encapsulation sharing **type**\\: bool ..", "for label\\-switched\\-multicast parameters .. attribute:: frr_holdtime Set time to keep", "['int'])), ]) self.forward_class_number = None self.fallback_type = None self.fallback_class_number_array =", "Platform(Entity): \"\"\" FIB platform parameters .. attribute:: label_switched_multicast Options for", "\"\"\" from collections import OrderedDict from ydk.types import Entity, EntityPath,", "PIC update, not to wait for timeout **type**\\: bool \"\"\"", "Set time to keep FRR slots programmed post FRR **type**\\:", "slots programmed post FRR **type**\\: int **range:** 3..180 **units**\\: second", "= None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] =", "\"[forward-class-number='\" + str(self.forward_class_number) + \"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" %", "class for fallback .. attribute:: forward_class_number (key) PBTS forward class", "OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'),", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self):", "= \"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class = False self.has_list_ancestor =", "self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])),", ".. attribute:: label_switched_multicast Options for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>`", "= 1 Fallback to class number list .. data:: any", "= False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes =", "self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path = lambda: \"platform\" self._absolute_path =", "**units**\\: second \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def", "Class) Fib pbts forward class .. data:: any = 8", "self.platform = Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"] = \"platform\" self._segment_path", "self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\"", "__init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class", "Fib pbts fallback .. data:: list = 1 Fallback to", "]) self.auto_hash_recover = None self.prefer_aib_routes = None self.encap_sharing_disable = None", "\"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True", "FIB platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set", "self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name,", "''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'),", "= True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name, value)", "**type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform parameters **type**\\:", ".. attribute:: encap_sharing_disable Set true to disable encapsulation sharing **type**\\:", "post FRR **type**\\: int **range:** 3..180 **units**\\: second \"\"\" _prefix", "This module contains definitions for the following management objects\\: fib\\:", "('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None self.fallback_type =", "def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name,", "class for fallback **type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix", "['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes = None self.encap_sharing_disable =", "'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])),", "name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity): \"\"\" Set", "def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity):", "disable encapsulation sharing **type**\\: bool .. attribute:: frr_follow_bgp_pic Set option", "= False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs =", "= \"platform\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names =", "True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def", "YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class FibPbtsFallback(Enum):", "interface up **type**\\: bool .. attribute:: prefer_aib_routes Set options for", "Options for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix =", "\"label-switched-multicast\" self._segment_path = lambda: \"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" %", "class number **type**\\: union of the below types: **type**\\: :py:class:`FibPbtsForwardClass", "ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList,", "list = 1 Fallback to class number list .. data::", "fallback .. attribute:: forward_class_number (key) PBTS forward class number **type**\\:", "\"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib,", "class .. data:: any = 8 Any class \"\"\" any", "from ydk.errors.error_handler import handle_type_error as _handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback", "= Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"] = \"platform\" self._segment_path =", "int **range:** 0..8 .. attribute:: fallback_type Set PBTS fallback type", "module contains definitions for the following management objects\\: fib\\: CEF", "value) class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class for fallback ..", "(c) 2013\\-2018 by Cisco Systems, Inc. All rights reserved. \"\"\"", "\"any\") drop = Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum", "Set option for fast\\-reroute to follow BGP PIC update, not", "\"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class = False self.has_list_ancestor = False", "None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\"", "super(Fib.Platform, self).__init__() self.yang_name = \"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class =", "FRR slots programmed post FRR **type**\\: int **range:** 3..180 **units**\\:", "lambda: \"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen =", "list = Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2, \"any\") drop =", "OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path", "OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime", "Options for label\\-switched\\-multicast parameters .. attribute:: frr_holdtime Set time to", "parameters .. attribute:: frr_holdtime Set time to keep FRR slots", "__init__(self): super(Fib.Platform, self).__init__() self.yang_name = \"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class", "= OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self)", ".. attribute:: fallback_class_number_array Set PBTS fallback class number array **type**\\:", "Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits,", "value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration .. attribute:: pbts_forward_class_fallback", "of int **range:** 0..7 \"\"\" _prefix = 'fib-common-cfg' _revision =", "[] self._segment_path = lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number) +", "\"\"\" FIB platform parameters .. attribute:: label_switched_multicast Options for label\\-switched\\-multicast", "self self._children_name_map[\"platform\"] = \"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen =", "routes **type**\\: bool .. attribute:: encap_sharing_disable Set true to disable", "name, value) class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class for fallback", "contains definitions for the following management objects\\: fib\\: CEF configuration", "(YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'),", "data:: drop = 3 Fallback to drop \"\"\" list =", "drop = Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class)", "PBTS class configuration .. attribute:: pbts_forward_class_fallback Set PBTS class for", "% self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast,", "0..8 .. attribute:: fallback_type Set PBTS fallback type **type**\\: :py:class:`FibPbtsFallback", "class number array **type**\\: list of int **range:** 0..7 \"\"\"", "Class) Fib pbts fallback .. data:: list = 1 Fallback", "= 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name", "_revision = '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name = \"platform\"", "self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number',", "def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value)", "PBTS forward class number **type**\\: union of the below types:", "\"\"\" any = Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\" CEF configuration", "None self.encap_sharing_disable = None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks()", "self.frr_holdtime = None self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path = lambda:", "self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen = True def", "Set PBTS fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True ..", "lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number) + \"']\" self._absolute_path =", "name, value) class LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast parameters ..", "self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [],", "]) self.forward_class_number = None self.fallback_type = None self.fallback_class_number_array = []", "= 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name", "False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"label-switched-multicast\",", "('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean,", "FIB platform parameters .. attribute:: label_switched_multicast Options for label\\-switched\\-multicast parameters", "attribute:: frr_follow_bgp_pic Set option for fast\\-reroute to follow BGP PIC", "FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class) Fib pbts forward class ..", "\"\"\" Set PBTS class for fallback .. attribute:: forward_class_number (key)", "'2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity = None self.yang_name =", "\"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number) + \"']\" self._absolute_path = lambda:", "self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class = False", "= '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name = \"platform\" self.yang_parent_name", "following management objects\\: fib\\: CEF configuration Copyright (c) 2013\\-2018 by", "= [] self._segment_path = lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number)", "PBTS fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute::", "class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class for fallback .. attribute::", "Inc. All rights reserved. \"\"\" from collections import OrderedDict from", "**range:** 0..8 .. attribute:: fallback_type Set PBTS fallback type **type**\\:", "Set PBTS fallback class number array **type**\\: list of int", ":py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def", "Set option for automatcially recovering consistent\\-hashing state on interface up", "class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration .. attribute:: pbts_forward_class_fallback Set", "['int'])), ]) self.frr_holdtime = None self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path", "true to disable encapsulation sharing **type**\\: bool .. attribute:: frr_follow_bgp_pic", "collection of YANG definitions for Cisco IOS\\-XR fib\\-common package configuration.", "pbts forward class .. data:: any = 8 Any class", "option for fast\\-reroute to follow BGP PIC update, not to", "\"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor = False", "self.yang_parent_name = \"platform\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names", "self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"]", "\"\"\" Cisco_IOS_XR_fib_common_cfg This module contains a collection of YANG definitions", "fib\\-common package configuration. This module contains definitions for the following", "Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3, \"drop\")", "self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg',", "platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option", "__init__(self): super(Fib, self).__init__() self._top_entity = None self.yang_name = \"fib\" self.yang_parent_name", "the below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8", "self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\",", "**type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01'", "'2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name =", "self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs =", "+ \"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen =", ":py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def", "self.ylist_key_names = [] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs =", "self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name,", "Set PBTS class for fallback **type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>`", "self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes", "= None self.fallback_class_number_array = [] self._segment_path = lambda: \"pbts-forward-class-fallback\" +", "OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\"", "self).__init__() self.yang_name = \"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class = False", "types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8 .. attribute::", "self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class = False", "FibPbtsFallback (Enum Class) Fib pbts fallback .. data:: list =", "def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\"", "configuration .. attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>`", "number list .. data:: any = 2 Fallback to any", "attribute:: platform FIB platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute::", "'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes = None self.encap_sharing_disable", "class Fib(Entity): \"\"\" CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS class", "\"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True", "= '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name", "self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes',", "self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback =", "for fallback .. attribute:: forward_class_number (key) PBTS forward class number", "pbts fallback .. data:: list = 1 Fallback to class", "fib\\: CEF configuration Copyright (c) 2013\\-2018 by Cisco Systems, Inc.", "value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity = Fib()", "('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover", "OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover',", "attribute:: label_switched_multicast Options for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\"", "'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None self.fallback_type = None self.fallback_class_number_array", "['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity): \"\"\" FIB platform", "fallback .. data:: list = 1 Fallback to class number", "'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration ..", "8 Any class \"\"\" any = Enum.YLeaf(8, \"any\") class Fib(Entity):", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option for automatcially recovering consistent\\-hashing", "All rights reserved. \"\"\" from collections import OrderedDict from ydk.types", "None self.prefer_aib_routes = None self.encap_sharing_disable = None self.frr_follow_bgp_pic = None", "label_switched_multicast Options for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix", "__setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity): \"\"\"", "EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty,", "Cisco_IOS_XR_fib_common_cfg This module contains a collection of YANG definitions for", "self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [],", "self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen = True def", "Set options for adjacency routes overriding RIB routes **type**\\: bool", "self.platform.parent = self self._children_name_map[\"platform\"] = \"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\"", "CEF configuration Copyright (c) 2013\\-2018 by Cisco Systems, Inc. All", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8 .. attribute:: fallback_type Set PBTS", "FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class) Fib pbts fallback .. data::", "any = Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\" CEF configuration ..", "self.fallback_type = None self.fallback_class_number_array = [] self._segment_path = lambda: \"pbts-forward-class-fallback\"", "programmed post FRR **type**\\: int **range:** 3..180 **units**\\: second \"\"\"", "class LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast parameters .. attribute:: frr_holdtime", "attribute:: fallback_type Set PBTS fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\:", "Copyright (c) 2013\\-2018 by Cisco Systems, Inc. All rights reserved.", "OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None self._segment_path", "= lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self,", "Cisco IOS\\-XR fib\\-common package configuration. This module contains definitions for", ".. data:: drop = 3 Fallback to drop \"\"\" list", "= 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name", "self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor", "keep FRR slots programmed post FRR **type**\\: int **range:** 3..180", "to disable encapsulation sharing **type**\\: bool .. attribute:: frr_follow_bgp_pic Set", "self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\",", "super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class =", "Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform()", "**type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg' _revision", "\"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover',", "self._segment_path = lambda: \"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path()", "value) class Platform(Entity): \"\"\" FIB platform parameters .. attribute:: label_switched_multicast", "self._segment_path = lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number) + \"']\"", "'fallback_class_number_array'], name, value) class Platform(Entity): \"\"\" FIB platform parameters ..", "YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" %", "LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors", "value): self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity): \"\"\" Options for", "Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean,", "= \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor =", "= Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform =", "'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None self._segment_path = lambda: \"label-switched-multicast\"", "pbts_forward_class_fallbacks PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform", "None self.yang_name = \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True", "(YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover =", "None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self", "self.forward_class_number = None self.fallback_type = None self.fallback_class_number_array = [] self._segment_path", "\"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen = True", ".. attribute:: frr_holdtime Set time to keep FRR slots programmed", "attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute::", "Fallback to any class .. data:: drop = 3 Fallback", "self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name,", "= '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>`", "fast\\-reroute to follow BGP PIC update, not to wait for", "= \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor =", "self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class", "False self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([])", "self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class = False", "(YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes = None", "Set PBTS class for fallback .. attribute:: forward_class_number (key) PBTS", "type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute:: fallback_class_number_array Set", "drop = 3 Fallback to drop \"\"\" list = Enum.YLeaf(1,", "True def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'],", "ydk.errors.error_handler import handle_type_error as _handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum", "PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration .. attribute:: pbts_forward_class_fallback Set PBTS", "= None self.fallback_type = None self.fallback_class_number_array = [] self._segment_path =", "from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity,", "(\"platform\", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes',", "= lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self,", "self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\",", "= lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self,", "self._top_entity = None self.yang_name = \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class", "list .. data:: any = 2 Fallback to any class", "Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path = lambda:", "False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs", "attribute:: forward_class_number (key) PBTS forward class number **type**\\: union of", "self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path()", "= True def __setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable',", "3..180 **units**\\: second \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01'", "for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg'", "__setattr__(self, name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity", "CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks", "False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\",", "[], name, value) class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class for", "'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32,", "\"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value):", "parameters .. attribute:: label_switched_multicast Options for label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast", "= False self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number'] self._child_classes =", "\"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor = False", "parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option for", "Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError", "value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity):", "overriding RIB routes **type**\\: bool .. attribute:: encap_sharing_disable Set true", "['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity = Fib() return self._top_entity", "from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList,", "to class number list .. data:: any = 2 Fallback", "array **type**\\: list of int **range:** 0..7 \"\"\" _prefix =", "Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import", "self.yang_name = \"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor", "consistent\\-hashing state on interface up **type**\\: bool .. attribute:: prefer_aib_routes", "+ str(self.forward_class_number) + \"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path()", "self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes", "Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from", "= False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))])", "def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name = \"platform\"", "module contains a collection of YANG definitions for Cisco IOS\\-XR", "PBTS fallback class number array **type**\\: list of int **range:**", "Cisco Systems, Inc. All rights reserved. \"\"\" from collections import", "attribute:: frr_holdtime Set time to keep FRR slots programmed post", "(key) PBTS forward class number **type**\\: union of the below", "self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type',", "to keep FRR slots programmed post FRR **type**\\: int **range:**", "**type**\\: bool .. attribute:: encap_sharing_disable Set true to disable encapsulation", "[] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback", "rights reserved. \"\"\" from collections import OrderedDict from ydk.types import", ".. attribute:: platform FIB platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` ..", "= lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen", "(YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback',", "lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name,", "self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path()", "\"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast,", "self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime',", "for adjacency routes overriding RIB routes **type**\\: bool .. attribute::", "attribute:: fallback_class_number_array Set PBTS fallback class number array **type**\\: list", "[] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast", "PBTS class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB", "Fallback to drop \"\"\" list = Enum.YLeaf(1, \"list\") any =", "name, value): self._perform_setattr(Fib.Platform.LabelSwitchedMulticast, ['frr_holdtime'], name, value) def clone_ptr(self): self._top_entity =", "__setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity): \"\"\"", "configuration Copyright (c) 2013\\-2018 by Cisco Systems, Inc. All rights", "objects\\: fib\\: CEF configuration Copyright (c) 2013\\-2018 by Cisco Systems,", "of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01'", "\"any\") class Fib(Entity): \"\"\" CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS", "__setattr__(self, name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value)", "for fallback **type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix =", "self.fallback_class_number_array = [] self._segment_path = lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" +", "[] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs", "any = Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum):", "class \"\"\" any = Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\" CEF", "of the below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:**", "platform FIB platform parameters **type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover", "(\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean,", "self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path", "parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg' _revision =", "label\\-switched\\-multicast parameters .. attribute:: frr_holdtime Set time to keep FRR", "second \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self):", "reserved. \"\"\" from collections import OrderedDict from ydk.types import Entity,", "fallback **type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg'", "(YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None self.fallback_type = None", "self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): \"\"\"", "**type**\\: list of int **range:** 0..7 \"\"\" _prefix = 'fib-common-cfg'", "\"\"\" FibPbtsFallback (Enum Class) Fib pbts fallback .. data:: list", "% self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks,", "definitions for the following management objects\\: fib\\: CEF configuration Copyright", "\"platform\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = []", "options for adjacency routes overriding RIB routes **type**\\: bool ..", "= \"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def", "False self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([", "'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name =", "self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number'] self._child_classes", "['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'),", "int **range:** 3..180 **units**\\: second \"\"\" _prefix = 'fib-common-cfg' _revision", ":py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option for automatcially recovering", "]) self.frr_holdtime = None self._segment_path = lambda: \"label-switched-multicast\" self._absolute_path =", "for fast\\-reroute to follow BGP PIC update, not to wait", "value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class PbtsForwardClassFallback(Entity): \"\"\" Set PBTS", "self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast", "OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable',", "self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs =", "= lambda: \"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen", "(YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ])", "YANG definitions for Cisco IOS\\-XR fib\\-common package configuration. This module", "fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute:: fallback_class_number_array", "'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None", "prefer_aib_routes Set options for adjacency routes overriding RIB routes **type**\\:", "= OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path", "to any class .. data:: drop = 3 Fallback to", "from ydk.filters import YFilter from ydk.errors import YError, YModelError from", ":py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute:: fallback_class_number_array Set PBTS fallback", "self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'),", "0..7 \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self):", "= Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\"", "Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError,", "number **type**\\: union of the below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>`", "self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent = self", "'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration", "fallback_type Set PBTS fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True", "drop \"\"\" list = Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2, \"any\")", "Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self", "= None self.prefer_aib_routes = None self.encap_sharing_disable = None self.frr_follow_bgp_pic =", "= 2 Fallback to any class .. data:: drop =", "timeout **type**\\: bool \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01'", "fallback_class_number_array Set PBTS fallback class number array **type**\\: list of", "'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name =", "self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names", "(Enum Class) Fib pbts forward class .. data:: any =", "**range:** 3..180 **units**\\: second \"\"\" _prefix = 'fib-common-cfg' _revision =", "bool .. attribute:: prefer_aib_routes Set options for adjacency routes overriding", "\"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = []", "as _handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class) Fib pbts", "= \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names =", "lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name,", "number array **type**\\: list of int **range:** 0..7 \"\"\" _prefix", "data:: any = 8 Any class \"\"\" any = Enum.YLeaf(8,", "\"fib\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = []", "to drop \"\"\" list = Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2,", "time to keep FRR slots programmed post FRR **type**\\: int", "[('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array',", "\"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform,", "lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib,", "data:: any = 2 Fallback to any class .. data::", "update, not to wait for timeout **type**\\: bool \"\"\" _prefix", "_revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\"", "self.yang_name = \"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class = False self.has_list_ancestor", "\"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor = False", "OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])),", "below types: **type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8 ..", "pbts_forward_class_fallback Set PBTS class for fallback **type**\\: list of :py:class:`PbtsForwardClassFallback", "= self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path = lambda: \"platform\" self._absolute_path", "platform parameters .. attribute:: label_switched_multicast Options for label\\-switched\\-multicast parameters **type**\\:", "forward class number **type**\\: union of the below types: **type**\\:", "YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters", "'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity): \"\"\" FIB platform parameters", "name, value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class configuration .. attribute::", "class Platform(Entity): \"\"\" FIB platform parameters .. attribute:: label_switched_multicast Options", "package configuration. This module contains definitions for the following management", "(YLeaf(YType.boolean, 'auto-hash-recover'), ['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'),", "self.yang_name = \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class = True self.has_list_ancestor", "= \"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"] =", "collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum,", "\"\"\" CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS class configuration **type**\\:", "lambda: \"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen =", "ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler", "'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name =", "**type**\\: int **range:** 0..8 .. attribute:: fallback_type Set PBTS fallback", "str(self.forward_class_number) + \"']\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen", "= OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs = OrderedDict([", "'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number", "self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor", "RIB routes **type**\\: bool .. attribute:: encap_sharing_disable Set true to", "frr_holdtime Set time to keep FRR slots programmed post FRR", "= \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor =", "('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])),", "= [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32,", "ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error", "\"Cisco-IOS-XR-fib-common-cfg:fib/pbts-forward-class-fallbacks/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value):", "['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes", "= OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ])", "['bool'])), ('prefer_aib_routes', (YLeaf(YType.boolean, 'prefer-aib-routes'), ['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic',", "[('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number =", "OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path =", "= \"label-switched-multicast\" self._segment_path = lambda: \"platform\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\"", "data:: list = 1 Fallback to class number list ..", "\"list\") any = Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3, \"drop\") class", "super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class =", "= lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def __setattr__(self, name, value):", "self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names", "self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs", "_handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class) Fib pbts fallback", "__init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name = \"platform\" self.is_top_level_class", "= lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\" + str(self.forward_class_number) + \"']\" self._absolute_path", "attribute:: prefer_aib_routes Set options for adjacency routes overriding RIB routes", "This module contains a collection of YANG definitions for Cisco", "for the following management objects\\: fib\\: CEF configuration Copyright (c)", "False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs", "None self.fallback_class_number_array = [] self._segment_path = lambda: \"pbts-forward-class-fallback\" + \"[forward-class-number='\"", "2 Fallback to any class .. data:: drop = 3", "lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name,", "'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None", "= 8 Any class \"\"\" any = Enum.YLeaf(8, \"any\") class", "attribute:: encap_sharing_disable Set true to disable encapsulation sharing **type**\\: bool", "= Enum.YLeaf(3, \"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class) Fib", "= OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] =", "\"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback,", "= '2017-05-01' def __init__(self): super(Fib, self).__init__() self._top_entity = None self.yang_name", "bool \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self):", "definitions for Cisco IOS\\-XR fib\\-common package configuration. This module contains", "import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList,", "= None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent =", "any = 2 Fallback to any class .. data:: drop", "configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform parameters", "\"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value):", "attribute:: auto_hash_recover Set option for automatcially recovering consistent\\-hashing state on", "= None self.yang_name = \"fib\" self.yang_parent_name = \"Cisco-IOS-XR-fib-common-cfg\" self.is_top_level_class =", "class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class) Fib pbts fallback ..", "forward_class_number (key) PBTS forward class number **type**\\: union of the", "self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen = True def", "_prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__()", "YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import", "self._children_name_map[\"platform\"] = \"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True", "YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class FibPbtsFallback(Enum): \"\"\"", "wait for timeout **type**\\: bool \"\"\" _prefix = 'fib-common-cfg' _revision", "routes overriding RIB routes **type**\\: bool .. attribute:: encap_sharing_disable Set", "on interface up **type**\\: bool .. attribute:: prefer_aib_routes Set options", "'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name =", "not to wait for timeout **type**\\: bool \"\"\" _prefix =", "= True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value)", "self.auto_hash_recover = None self.prefer_aib_routes = None self.encap_sharing_disable = None self.frr_follow_bgp_pic", "Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))]) self._leafs = OrderedDict([ ('auto_hash_recover', (YLeaf(YType.boolean, 'auto-hash-recover'),", "True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks, [], name, value) class", "self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path = lambda: \"platform\" self._absolute_path = lambda:", "<ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self):", "_prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform, self).__init__()", "\"label-switched-multicast\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/platform/%s\" % self._segment_path() self._is_frozen = True", "bool .. attribute:: encap_sharing_disable Set true to disable encapsulation sharing", "class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class) Fib pbts forward class", "super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\" self.is_top_level_class =", "option for automatcially recovering consistent\\-hashing state on interface up **type**\\:", "_prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__()", "'')])), ('fallback_class_number_array', (YLeafList(YType.uint32, 'fallback-class-number-array'), ['int'])), ]) self.forward_class_number = None self.fallback_type", "\"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor = False", "follow BGP PIC update, not to wait for timeout **type**\\:", "def __init__(self): super(Fib.Platform, self).__init__() self.yang_name = \"platform\" self.yang_parent_name = \"fib\"", "__init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name = \"fib\" self.is_top_level_class", "= OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None", "class configuration **type**\\: :py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform", "of YANG definitions for Cisco IOS\\-XR fib\\-common package configuration. This", "Fib pbts forward class .. data:: any = 8 Any", "= True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'],", "2013\\-2018 by Cisco Systems, Inc. All rights reserved. \"\"\" from", "Set true to disable encapsulation sharing **type**\\: bool .. attribute::", "BGP PIC update, not to wait for timeout **type**\\: bool", "3 Fallback to drop \"\"\" list = Enum.YLeaf(1, \"list\") any", "configuration .. attribute:: pbts_forward_class_fallback Set PBTS class for fallback **type**\\:", "self.prefer_aib_routes = None self.encap_sharing_disable = None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks", "self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\",", ":py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8 .. attribute:: fallback_type Set", "def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name = \"pbts-forward-class-fallbacks\"", "self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"] = \"pbts-forward-class-fallbacks\" self.platform", "\"drop\") class FibPbtsForwardClass(Enum): \"\"\" FibPbtsForwardClass (Enum Class) Fib pbts forward", "value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity): \"\"\"", "= 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name", "Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64", "name, value): self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity): \"\"\" Options", "= None self.encap_sharing_disable = None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks =", "recovering consistent\\-hashing state on interface up **type**\\: bool .. attribute::", "= OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast()", "\"\"\" list = Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2, \"any\") drop", "'2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\" self.yang_parent_name =", "self._child_classes = OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast =", "**mandatory**\\: True .. attribute:: fallback_class_number_array Set PBTS fallback class number", "'2017-05-01' def __init__(self): super(Fib.Platform, self).__init__() self.yang_name = \"platform\" self.yang_parent_name =", "None self.fallback_type = None self.fallback_class_number_array = [] self._segment_path = lambda:", "% self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform,", "attribute:: pbts_forward_class_fallback Set PBTS class for fallback **type**\\: list of", "'2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, self).__init__() self.yang_name = \"pbts-forward-class-fallback\" self.yang_parent_name =", "def __init__(self): super(Fib, self).__init__() self._top_entity = None self.yang_name = \"fib\"", "**type**\\: :py:class:`FibPbtsForwardClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsForwardClass>` **type**\\: int **range:** 0..8 .. attribute:: fallback_type", "= \"fib\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names =", "state on interface up **type**\\: bool .. attribute:: prefer_aib_routes Set", ".. data:: list = 1 Fallback to class number list", "['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS", "**type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>` **mandatory**\\: True .. attribute:: fallback_class_number_array Set PBTS", "import handle_type_error as _handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class)", "Fib(Entity): \"\"\" CEF configuration .. attribute:: pbts_forward_class_fallbacks PBTS class configuration", "= '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\" self.yang_parent_name", "self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime =", "= Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\" CEF configuration .. attribute::", "\"pbts-forward-class-fallbacks\" self.platform = Fib.Platform() self.platform.parent = self self._children_name_map[\"platform\"] = \"platform\"", "YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from", "True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name,", "name, value): self._perform_setattr(Fib, ['auto_hash_recover', 'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class", "FibPbtsForwardClass (Enum Class) Fib pbts forward class .. data:: any", "**type**\\: bool .. attribute:: frr_follow_bgp_pic Set option for fast\\-reroute to", "bool .. attribute:: frr_follow_bgp_pic Set option for fast\\-reroute to follow", "self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs", "lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\" % self._segment_path() self._is_frozen =", "FRR **type**\\: int **range:** 3..180 **units**\\: second \"\"\" _prefix =", "= \"platform\" self.yang_parent_name = \"fib\" self.is_top_level_class = False self.has_list_ancestor =", "name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback, ['forward_class_number', 'fallback_type', 'fallback_class_number_array'], name, value) class Platform(Entity):", "= [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallbacks\", (\"pbts_forward_class_fallbacks\", Fib.PbtsForwardClassFallbacks)), (\"platform\", (\"platform\", Fib.Platform))])", "True .. attribute:: fallback_class_number_array Set PBTS fallback class number array", "self.encap_sharing_disable = None self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent", ".. attribute:: forward_class_number (key) PBTS forward class number **type**\\: union", "1 Fallback to class number list .. data:: any =", "= True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes =", "contains a collection of YANG definitions for Cisco IOS\\-XR fib\\-common", "'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsFallback', '')])),", "[] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'),", "= OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass',", "= ['forward_class_number'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('forward_class_number', (YLeaf(YType.str,", "\"platform\" self._segment_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib\" self._is_frozen = True def __setattr__(self,", "= OrderedDict([ ('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration,", "OrderedDict([(\"label-switched-multicast\", (\"label_switched_multicast\", Fib.Platform.LabelSwitchedMulticast))]) self._leafs = OrderedDict() self.label_switched_multicast = Fib.Platform.LabelSwitchedMulticast() self.label_switched_multicast.parent", "class configuration .. attribute:: pbts_forward_class_fallback Set PBTS class for fallback", "automatcially recovering consistent\\-hashing state on interface up **type**\\: bool ..", ".. attribute:: frr_follow_bgp_pic Set option for fast\\-reroute to follow BGP", ":py:class:`PbtsForwardClassFallbacks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks>` .. attribute:: platform FIB platform parameters **type**\\: :py:class:`Platform", "= [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict()", "('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ]) self.auto_hash_recover = None self.prefer_aib_routes =", "up **type**\\: bool .. attribute:: prefer_aib_routes Set options for adjacency", "def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name, value) class LabelSwitchedMulticast(Entity):", "= YList(self) self._segment_path = lambda: \"pbts-forward-class-fallbacks\" self._absolute_path = lambda: \"Cisco-IOS-XR-fib-common-cfg:fib/%s\"", "**type**\\: bool .. attribute:: prefer_aib_routes Set options for adjacency routes", "from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as", "\"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['forward_class_number']", ".. data:: any = 8 Any class \"\"\" any =", "('frr_holdtime', (YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None self._segment_path =", "_revision = '2017-05-01' def __init__(self): super(Fib.Platform.LabelSwitchedMulticast, self).__init__() self.yang_name = \"label-switched-multicast\"", "handle_type_error as _handle_type_error class FibPbtsFallback(Enum): \"\"\" FibPbtsFallback (Enum Class) Fib", "**type**\\: bool \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def", ".. attribute:: fallback_type Set PBTS fallback type **type**\\: :py:class:`FibPbtsFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.FibPbtsFallback>`", "['bool'])), ('encap_sharing_disable', (YLeaf(YType.boolean, 'encap-sharing-disable'), ['bool'])), ('frr_follow_bgp_pic', (YLeaf(YType.boolean, 'frr-follow-bgp-pic'), ['bool'])), ])", "LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast parameters .. attribute:: frr_holdtime Set", "value) class LabelSwitchedMulticast(Entity): \"\"\" Options for label\\-switched\\-multicast parameters .. attribute::", "False self.ylist_key_names = [] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([", "management objects\\: fib\\: CEF configuration Copyright (c) 2013\\-2018 by Cisco", "= \"pbts-forward-class-fallbacks\" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names =", "**range:** 0..7 \"\"\" _prefix = 'fib-common-cfg' _revision = '2017-05-01' def", "YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error", "PbtsForwardClassFallback(Entity): \"\"\" Set PBTS class for fallback .. attribute:: forward_class_number", "('forward_class_number', (YLeaf(YType.str, 'forward-class-number'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg', 'FibPbtsForwardClass', ''),'int'])), ('fallback_type', (YLeaf(YType.enumeration, 'fallback-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg',", "to wait for timeout **type**\\: bool \"\"\" _prefix = 'fib-common-cfg'", ".. attribute:: prefer_aib_routes Set options for adjacency routes overriding RIB", "configuration. This module contains definitions for the following management objects\\:", "(YLeaf(YType.uint32, 'frr-holdtime'), ['int'])), ]) self.frr_holdtime = None self._segment_path = lambda:", "Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))]) self._leafs = OrderedDict() self.pbts_forward_class_fallback = YList(self) self._segment_path = lambda:", "% self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback,", "to follow BGP PIC update, not to wait for timeout", "**type**\\: :py:class:`Platform <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform>` .. attribute:: auto_hash_recover Set option for automatcially", "label\\-switched\\-multicast parameters **type**\\: :py:class:`LabelSwitchedMulticast <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.Platform.LabelSwitchedMulticast>` \"\"\" _prefix = 'fib-common-cfg' _revision", "self.frr_follow_bgp_pic = None self.pbts_forward_class_fallbacks = Fib.PbtsForwardClassFallbacks() self.pbts_forward_class_fallbacks.parent = self self._children_name_map[\"pbts_forward_class_fallbacks\"]", "True def __setattr__(self, name, value): self._perform_setattr(Fib.Platform, [], name, value) class", "= 3 Fallback to drop \"\"\" list = Enum.YLeaf(1, \"list\")", "self.label_switched_multicast.parent = self self._children_name_map[\"label_switched_multicast\"] = \"label-switched-multicast\" self._segment_path = lambda: \"platform\"", "_prefix = 'fib-common-cfg' _revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__()", "list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\" _prefix = 'fib-common-cfg' _revision =", "PBTS class for fallback **type**\\: list of :py:class:`PbtsForwardClassFallback <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fib_common_cfg.Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback>` \"\"\"", "(Enum Class) Fib pbts fallback .. data:: list = 1", "Any class \"\"\" any = Enum.YLeaf(8, \"any\") class Fib(Entity): \"\"\"", "= False self.ylist_key_names = [] self._child_classes = OrderedDict([(\"pbts-forward-class-fallback\", (\"pbts_forward_class_fallback\", Fib.PbtsForwardClassFallbacks.PbtsForwardClassFallback))])", "_revision = '2017-05-01' def __init__(self): super(Fib.PbtsForwardClassFallbacks, self).__init__() self.yang_name = \"pbts-forward-class-fallbacks\"", "import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType,", "'prefer_aib_routes', 'encap_sharing_disable', 'frr_follow_bgp_pic'], name, value) class PbtsForwardClassFallbacks(Entity): \"\"\" PBTS class", ".. attribute:: pbts_forward_class_fallback Set PBTS class for fallback **type**\\: list", "= Enum.YLeaf(1, \"list\") any = Enum.YLeaf(2, \"any\") drop = Enum.YLeaf(3,", "for automatcially recovering consistent\\-hashing state on interface up **type**\\: bool" ]
[ "from __future__ import annotations from typing import Optional, Sequence, TYPE_CHECKING", "Array from core.constants import PlayerForm, SimActKind, MomentType from core.database import", "kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions = None if ex_act_ids:", "combo chain\"\"\" from __future__ import annotations from typing import Optional,", "None if ex_act_ids: self.ex_actions: Array[Action] = Array() for idx, act_id", "= (self._data[\"_ActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids =", "= (id + i for i in range(combo_max)) Combos.__init__(self, player,", "form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None:", "ex_act_ids: self.ex_actions: Array[Action] = Array() for idx, act_id in enumerate(ex_act_ids):", "id) act_ids = (self._data[\"_ActionId\"] + i for i in range(self._data[\"_MaxComboNum\"]))", "import Action from core.utility import Array from core.constants import PlayerForm,", "None if not self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"])", "act_id in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player,", "class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id: int, player: Player)", "not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx +", "pass return self.actions[1] def __repr__(self) -> str: if self.ex_actions: return", "in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"]", "idx, act_id in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id,", "act_ids = (id + i for i in range(combo_max)) Combos.__init__(self,", "if TYPE_CHECKING: from entity.player import Player class Combos: def __init__(self,", "form a combo chain\"\"\" from __future__ import annotations from typing", "self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if not self._data[\"_DefaultSkill05Ex\"] else (0, 0,", "annotations from typing import Optional, Sequence, TYPE_CHECKING from action import", "class Combos: def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int],", "core.utility import Array from core.constants import PlayerForm, SimActKind, MomentType from", "IndexError: pass return self.actions[1] def __repr__(self) -> str: if self.ex_actions:", "import FromDB if TYPE_CHECKING: from entity.player import Player class Combos:", "== 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs): pass class", "self.actions)) + \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) + \"]\" return \"->\".join(map(repr,", "-> None: act_ids = (id + i for i in", "= player self.actions: Array[Action] = Array() for idx, act_id in", "Sequence, TYPE_CHECKING from action import Action from core.utility import Array", "def next(self): if self.player.current in self.actions: try: return self.actions[self.player.current.index +", "None: self.player = player self.actions: Array[Action] = Array() for idx,", "None if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i for i", "None: FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5)", "\"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) + \"]\" return \"->\".join(map(repr, self.actions)) class", "FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id: int, player: Player) -> None:", "if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i for i in", "self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) def", "act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self,", "self.ex_actions)) + \"]\" return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"):", "Player class Combos: def __init__(self, player: Player, form: PlayerForm, act_ids:", "self.player.current in self.actions: try: return self.actions[self.player.current.index + 1] except IndexError:", "else (self._data[\"_ExActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player,", "None: FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"] + i for i", "self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id: int, player:", "+ i for i in range(combo_max)) Combos.__init__(self, player, PlayerForm.DRG, act_ids)", "chain\"\"\" from __future__ import annotations from typing import Optional, Sequence,", "= Array() for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO,", "return self.actions[1] def __repr__(self) -> str: if self.ex_actions: return \"->\".join(map(repr,", "player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"] +", "(id + i for i in range(combo_max)) Combos.__init__(self, player, PlayerForm.DRG,", "Optional[Sequence[int]] = None) -> None: self.player = player self.actions: Array[Action]", "1)) def next(self): if self.player.current in self.actions: try: return self.actions[self.player.current.index", "-> str: if self.ex_actions: return \"->\".join(map(repr, self.actions)) + \"\\tEX[\" +", "DragonCombos(Combos): def __init__(self, id: int, combo_max: int, player: Player) ->", "i in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if not self._data[\"_ExActionId\"] else", "from core.database import FromDB if TYPE_CHECKING: from entity.player import Player", "self.ex_actions: return \"->\".join(map(repr, self.actions)) + \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) +", "ex_act_ids = None if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i", "typing import Optional, Sequence, TYPE_CHECKING from action import Action from", "if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx", "act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None: self.player =", "form=form, index=idx + 1)) self.ex_actions = None if ex_act_ids: self.ex_actions:", "(0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)", "a combo chain\"\"\" from __future__ import annotations from typing import", "PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None: self.player", "0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class", "FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5) if", "continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) def next(self):", "\"->\".join(map(repr, self.ex_actions)) + \"]\" return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB,", "actions that form a combo chain\"\"\" from __future__ import annotations", "index=idx + 1)) self.ex_actions = None if ex_act_ids: self.ex_actions: Array[Action]", "import Player class Combos: def __init__(self, player: Player, form: PlayerForm,", "self.actions: try: return self.actions[self.player.current.index + 1] except IndexError: pass return", "= (self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids =", "that form a combo chain\"\"\" from __future__ import annotations from", "MomentType from core.database import FromDB if TYPE_CHECKING: from entity.player import", "TYPE_CHECKING: from entity.player import Player class Combos: def __init__(self, player:", "act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))", "(self._data[\"_ActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None", "table=\"CharaUniqueCombo\"): def __init__(self, id: int, player: Player) -> None: FromDB.__init__(self,", "= None if ex_act_ids: self.ex_actions: Array[Action] = Array() for idx,", "id: int, player: Player) -> None: FromDB.__init__(self, id) act_ids =", "i for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if not", "pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self, id: int, player:", "player, kind=SimActKind.COMBO, form=form, index=idx + 1)) def next(self): if self.player.current", "ex_act_ids = None if not self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0,", "return \"->\".join(map(repr, self.actions)) + \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) + \"]\"", "\"->\".join(map(repr, self.actions)) + \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) + \"]\" return", "in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO,", "+ 1] except IndexError: pass return self.actions[1] def __repr__(self) ->", "class DragonCombos(Combos): def __init__(self, id: int, combo_max: int, player: Player)", "def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]]", "= None) -> None: self.player = player self.actions: Array[Action] =", "enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions =", "if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs):", "from entity.player import Player class Combos: def __init__(self, player: Player,", "player, kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions = None if", "import Optional, Sequence, TYPE_CHECKING from action import Action from core.utility", "Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"] + i", "int, player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"]", "player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id: int,", "+ \"]\" return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def", "act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids", "int, player: Player) -> None: act_ids = (id + i", "self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) def next(self): if", "player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None)", "in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] ==", "if ex_act_ids: self.ex_actions: Array[Action] = Array() for idx, act_id in", "+ 1)) def next(self): if self.player.current in self.actions: try: return", "-> None: FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"] + i for", "next(self): if self.player.current in self.actions: try: return self.actions[self.player.current.index + 1]", "FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"] + i for i in", "Action from core.utility import Array from core.constants import PlayerForm, SimActKind,", "except IndexError: pass return self.actions[1] def __repr__(self) -> str: if", "if self.ex_actions: return \"->\".join(map(repr, self.actions)) + \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions))", "if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if not self._data[\"_DefaultSkill05Ex\"] else (0,", "+ \"\\tEX[\" + \"->\".join(map(repr, self.ex_actions)) + \"]\" return \"->\".join(map(repr, self.actions))", "self.ex_actions = None if ex_act_ids: self.ex_actions: Array[Action] = Array() for", "i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"]", "DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self, id: int, player: Player) ->", "class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self, id: int, player: Player)", "__future__ import annotations from typing import Optional, Sequence, TYPE_CHECKING from", "__init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] =", "TYPE_CHECKING from action import Action from core.utility import Array from", "in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions", "int, player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[\"_ActionId\"]", "= Array() for idx, act_id in enumerate(ex_act_ids): if not act_id:", "entity.player import Player class Combos: def __init__(self, player: Player, form:", "return self.actions[self.player.current.index + 1] except IndexError: pass return self.actions[1] def", "import annotations from typing import Optional, Sequence, TYPE_CHECKING from action", "core.database import FromDB if TYPE_CHECKING: from entity.player import Player class", "0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos):", "Combos: def __init__(self, player: Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids:", "of actions that form a combo chain\"\"\" from __future__ import", "act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id: int, combo_max: int,", "form=form, index=idx + 1)) def next(self): if self.player.current in self.actions:", "from typing import Optional, Sequence, TYPE_CHECKING from action import Action", "not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i for i in range(self._data[\"_MaxComboNum\"]))", "Array() for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form,", "\"\"\"Series of actions that form a combo chain\"\"\" from __future__", "idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx +", "Array[Action] = Array() for idx, act_id in enumerate(ex_act_ids): if not", "*args, **kwargs): pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self, id:", "__init__(self, id: int, combo_max: int, player: Player) -> None: act_ids", "import PlayerForm, SimActKind, MomentType from core.database import FromDB if TYPE_CHECKING:", "player self.actions: Array[Action] = Array() for idx, act_id in enumerate(act_ids):", "from core.utility import Array from core.constants import PlayerForm, SimActKind, MomentType", "(self._data[\"_ExActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV,", "self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1)) self.ex_actions = None", "\"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id: int,", "0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def", "ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id: int, combo_max: int, player:", "for i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if", "self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self,", "for idx, act_id in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue", "not self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player,", "self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs): pass class DefaultCombos(Combos, FromDB,", "for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx", "**kwargs): pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self, id: int,", "for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if", "in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if not self._data[\"_DefaultSkill05Ex\"]", "combo_max: int, player: Player) -> None: act_ids = (id +", "act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form, index=idx + 1))", "if self.player.current in self.actions: try: return self.actions[self.player.current.index + 1] except", "int, combo_max: int, player: Player) -> None: act_ids = (id", "id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"])", "import Array from core.constants import PlayerForm, SimActKind, MomentType from core.database", "range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if not self._data[\"_DefaultSkill05Ex\"] else", "from core.constants import PlayerForm, SimActKind, MomentType from core.database import FromDB", "def __init__(self, id: int, player: Player) -> None: FromDB.__init__(self, id)", "(self._data[f\"_DefaultSkill{i+1:02}\"] for i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None", "player: Player) -> None: act_ids = (id + i for", "else (0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids,", "try: return self.actions[self.player.current.index + 1] except IndexError: pass return self.actions[1]", "+ i for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids,", "-> None: FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i in", "Optional, Sequence, TYPE_CHECKING from action import Action from core.utility import", "self.ex_actions: Array[Action] = Array() for idx, act_id in enumerate(ex_act_ids): if", "range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] +", "from action import Action from core.utility import Array from core.constants", "= None if not self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0, 0,", "Player) -> None: act_ids = (id + i for i", "ex_act_ids: Optional[Sequence[int]] = None) -> None: self.player = player self.actions:", "None: act_ids = (id + i for i in range(combo_max))", "id: int, combo_max: int, player: Player) -> None: act_ids =", "1] except IndexError: pass return self.actions[1] def __repr__(self) -> str:", "self.actions: Array[Action] = Array() for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id,", "index=idx + 1)) def next(self): if self.player.current in self.actions: try:", "UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id: int, player: Player) ->", "in self.actions: try: return self.actions[self.player.current.index + 1] except IndexError: pass", "act_ids = (self._data[\"_ActionId\"] + i for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids", "+ i for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if", "def enable(self, *args, **kwargs): pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def", "table=\"WeaponType\"): def __init__(self, id: int, player: Player) -> None: FromDB.__init__(self,", "i for i in range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids)", "player: Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for", "SimActKind, MomentType from core.database import FromDB if TYPE_CHECKING: from entity.player", "def __init__(self, id: int, combo_max: int, player: Player) -> None:", "self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self,", "Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT,", "core.constants import PlayerForm, SimActKind, MomentType from core.database import FromDB if", "Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id:", "Player, form: PlayerForm, act_ids: Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) ->", "Array[Action] = Array() for idx, act_id in enumerate(act_ids): self.actions.append(Action(act_id, player,", "str: if self.ex_actions: return \"->\".join(map(repr, self.actions)) + \"\\tEX[\" + \"->\".join(map(repr,", "FromDB, table=\"WeaponType\"): def __init__(self, id: int, player: Player) -> None:", "__init__(self, id: int, player: Player) -> None: FromDB.__init__(self, id) act_ids", "range(self._data[\"_MaxComboNum\"])) Combos.__init__(self, player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1:", "action import Action from core.utility import Array from core.constants import", "Array() for idx, act_id in enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None)", "\"]\" return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self,", "1)) self.ex_actions = None if ex_act_ids: self.ex_actions: Array[Action] = Array()", "self.actions[self.player.current.index + 1] except IndexError: pass return self.actions[1] def __repr__(self)", "player, PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable)", "for i in range(self._data[\"_MaxComboNum\"])) ex_act_ids = None if not self._data[\"_ExActionId\"]", "+ 1)) self.ex_actions = None if ex_act_ids: self.ex_actions: Array[Action] =", "-> None: self.player = player self.actions: Array[Action] = Array() for", "ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args,", "1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs): pass class DefaultCombos(Combos,", "self.player = player self.actions: Array[Action] = Array() for idx, act_id", "PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) if self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def", "+ \"->\".join(map(repr, self.ex_actions)) + \"]\" return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos,", "self.enable) def enable(self, *args, **kwargs): pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"):", "PlayerForm, SimActKind, MomentType from core.database import FromDB if TYPE_CHECKING: from", "= None if not self._data[\"_ExActionId\"] else (self._data[\"_ExActionId\"] + i for", "self.actions[1] def __repr__(self) -> str: if self.ex_actions: return \"->\".join(map(repr, self.actions))", "Player) -> None: FromDB.__init__(self, id) act_ids = (self._data[f\"_DefaultSkill{i+1:02}\"] for i", "enumerate(ex_act_ids): if not act_id: self.ex_actions.append(None) continue self.ex_actions.append(Action(act_id, player, kind=SimActKind.COMBO, form=form,", "FromDB if TYPE_CHECKING: from entity.player import Player class Combos: def", "Sequence[int], ex_act_ids: Optional[Sequence[int]] = None) -> None: self.player = player", "return \"->\".join(map(repr, self.actions)) class UniqueCombos(Combos, FromDB, table=\"CharaUniqueCombo\"): def __init__(self, id:", "kind=SimActKind.COMBO, form=form, index=idx + 1)) def next(self): if self.player.current in", "PlayerForm.ADV, act_ids, ex_act_ids=ex_act_ids) class DragonCombos(Combos): def __init__(self, id: int, combo_max:", "None) -> None: self.player = player self.actions: Array[Action] = Array()", "i in range(5) if self._data[f\"_DefaultSkill{i+1:02}\"]) ex_act_ids = None if not", "self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self, player, PlayerForm.ADV,", "__repr__(self) -> str: if self.ex_actions: return \"->\".join(map(repr, self.actions)) + \"\\tEX[\"", "self._data[\"_ShiftConditionType\"] == 1: self.player.events.listen(MomentType.HIT, self.enable) def enable(self, *args, **kwargs): pass", "if not self._data[\"_DefaultSkill05Ex\"] else (0, 0, 0, 0, self._data[\"_DefaultSkill05Ex\"]) Combos.__init__(self,", "enable(self, *args, **kwargs): pass class DefaultCombos(Combos, FromDB, table=\"WeaponType\"): def __init__(self,", "def __repr__(self) -> str: if self.ex_actions: return \"->\".join(map(repr, self.actions)) +" ]
[ "A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The", "only be attached to requests if those requests are \"same-site\".", "\"\"\" Set this if you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel`", "True \"\"\" Controls if the cookie should be set with", "a permanent session will be refreshed each request and get", "None \"\"\" Set this if you need to customize the", "if that is not set for '/'. Defaults to ``None``.", "\"\"\" A :class:`pymongo.MongoClient` instance. By default, connect to ``127.0.0.1:27017``. \"\"\"", "session will be refreshed each request and get their lifetime", "timedelta(days=31) \"\"\" The lifetime of a permanent session as ``datetime.timedelta``", "security. Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE = False \"\"\" Controls", "Controls if the cookie should be set with the ``httponly``", "closes. Defaults to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration", "- ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` -", "the session actually modifies. Non permanent sessions are not affected", "session actually modifies. Non permanent sessions are not affected by", "``True``. \"\"\" SESSION_COOKIE_SECURE = False \"\"\" Controls if the cookie", "Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE = False \"\"\" Controls if", "``'Lax'`` (recommended) or ``'Strict'``. Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST =", "- ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\"", "or an integer representing seconds. Defaults to 31 days. \"\"\"", "cookie such that it will only be attached to requests", ":class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions in the database. \"\"\"", "be set to ``'Lax'`` (recommended) or ``'Strict'``. Defaults to ``None``.", "\"\"\" The name of the SQL table you want to", "db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions'", "this is not set the cookie will be valid for", "The name of the session cookie. Defaults to ``'session'``. \"\"\"", "eg ``0o600``. Defaults to ``0o600``. \"\"\" SESSION_MONGODB = None \"\"\"", "requests if those requests are \"same-site\". Can be set to", "will always expire if the browser window closes. Defaults to", "A :class:`memcached.Client` instance. By default, connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR", "stores before it starts deleting some. Defaults to 500. \"\"\"", "must be served over HTTPS for this to make sense.", "\"\"\" SESSION_COOKIE_SAMESITE = None \"\"\" Restrict how cookies are sent", "the browser window closes. Defaults to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions):", "Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\" Set this", "\"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\" The maximum number of items", "for the session cookie. If this is not set the", "the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions in the database.", "is marked ``secure``. The application must be served over HTTPS", "if the cookie should be set with the ``httponly`` flag.", "(recommended) or ``'Strict'``. Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True", "SESSION_COOKIE_NAME = 'session' \"\"\" The name of the session cookie.", "the session cookie. If this is not set the cookie", "only be modified if the session actually modifies. Non permanent", "actually modifies. Non permanent sessions are not affected by this", "only send cookies with requests over HTTPS if the cookie", "not. Defaults to ``True``. \"\"\" SESSION_USE_SIGNER = False \"\"\" Whether", "'sessions' \"\"\" The MongoDB collection you want to use. Defaults", "connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The", "behavior. If set to ``True`` a permanent session will be", "= 'flask_session' \"\"\" The MongoDB database you want to use.", "their lifetime extended, if set to ``False`` it will only", "\"\"\" The MongoDB database you want to use. Defaults to", "SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The name of the SQL table", "sent with requests from external sites. Limits the scope of", "Controls the set-cookie behavior. If set to ``True`` a permanent", "= 0o600 \"\"\" The file mode wanted for the session", "= 'session:' \"\"\" A prefix that is added before all", "The name of the SQL table you want to use.", "the session stores before it starts deleting some. Defaults to", "os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder where session files are stored.", "will be valid for all subdomains of ``SERVER_NAME``. Defaults to", "number of items the session stores before it starts deleting", "\"\"\" SESSION_COOKIE_DOMAIN = None \"\"\" The domain for the session", "to use. Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\"", "sign the session cookie sid or not. If set to", "those requests are \"same-site\". Can be set to ``'Lax'`` (recommended)", "``False`` it will only be modified if the session actually", "SESSION_USE_SIGNER = False \"\"\" Whether sign the session cookie sid", ":class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``:", "MongoDB collection you want to use. Defaults to ``'sessions'``. \"\"\"", "is not set the cookie will be valid for all", "By default, connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\"", "be set with the ``httponly`` flag. Browsers will not allow", "flask_unchained.bundles.sqlalchemy import db except ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig):", "\"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The lifetime of a permanent", "``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\" Set this if you", "starts deleting some. Defaults to 500. \"\"\" SESSION_FILE_MODE = 0o600", "folder named ``flask_sessions`` in your current working directory. \"\"\" SESSION_FILE_THRESHOLD", "\"\"\" SESSION_REDIS = None \"\"\" A :class:`redis.Redis` instance. By default,", "be modified if the session actually modifies. Non permanent sessions", "If this is not set, the cookie will be valid", "\"same-site\". Can be set to ``'Lax'`` (recommended) or ``'Strict'``. Defaults", "Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\" Controls if", "if the browser window closes. Defaults to ``True``. \"\"\" class", "you have to set ``SECRET_KEY``. Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX", "\"\"\" Specifies which type of session interface to use. Built-in", "session cookie. If this is not set, the cookie will", "collection you want to use. Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY", "to use. Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db \"\"\"", "``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to", "JavaScript access to cookies marked as ``httponly`` for security. Defaults", "instance. By default, connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(),", "\"\"\" SESSION_MONGODB = None \"\"\" A :class:`pymongo.MongoClient` instance. By default,", "to cookies marked as ``httponly`` for security. Defaults to ``True``.", "By default, connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions')", "be valid for all subdomains of ``SERVER_NAME``. Defaults to ``None``.", "SESSION_MONGODB = None \"\"\" A :class:`pymongo.MongoClient` instance. By default, connect", "lifetime extended, if set to ``False`` it will only be", "``True``, you have to set ``SECRET_KEY``. Defaults to ``False``. \"\"\"", "be valid for all of ``APPLICATION_ROOT`` or if that is", "= False \"\"\" Whether sign the session cookie sid or", "not set, the cookie will be valid for all subdomains", "SESSION_PERMANENT = True \"\"\" Whether use permanent session or not.", "``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\" The MongoDB database you", "try: from flask_unchained.bundles.sqlalchemy import db except ImportError: db = None", "= 'sessions' \"\"\" The MongoDB collection you want to use.", "'flask_session' \"\"\" The MongoDB database you want to use. Defaults", "the ``secure`` flag. Browsers will only send cookies with requests", "external sites. Limits the scope of the cookie such that", "permanent session as ``datetime.timedelta`` object or an integer representing seconds.", "application must be served over HTTPS for this to make", "to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The MongoDB collection", "cookies are sent with requests from external sites. Limits the", "instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The name of the", "SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls the set-cookie behavior. If set", "to use. Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\"", "have to set ``SECRET_KEY``. Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX =", "types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``:", "_DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\" The name of the session", "options for the Session Bundle. \"\"\" SESSION_TYPE = 'null' \"\"\"", "valid for all of ``APPLICATION_ROOT`` or if that is not", "``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` -", "as ``httponly`` for security. Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE =", "\"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder where session", "will only send cookies with requests over HTTPS if the", "it will only be modified if the session actually modifies.", "session as ``datetime.timedelta`` object or an integer representing seconds. Defaults", "requests over HTTPS if the cookie is marked ``secure``. The", "``'null'``. \"\"\" SESSION_PERMANENT = True \"\"\" Whether use permanent session", "\"\"\" SESSION_PERMANENT = True \"\"\" Whether use permanent session or", "the same backend storage server for different apps. Defaults to", "If set to ``True`` a permanent session will be refreshed", "= None \"\"\" A :class:`memcached.Client` instance. By default, connect to", "permanent session will be refreshed each request and get their", "maximum number of items the session stores before it starts", "SESSION_MONGODB_DB = 'flask_session' \"\"\" The MongoDB database you want to", "server for different apps. Defaults to ``'session:'``. \"\"\" SESSION_REDIS =", "always expire if the browser window closes. Defaults to ``True``.", "Browsers will only send cookies with requests over HTTPS if", "PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The lifetime of a permanent session", "\"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\" Set this if you need", "set-cookie behavior. If set to ``True`` a permanent session will", "Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy`", "and will always expire if the browser window closes. Defaults", "backend storage server for different apps. Defaults to ``'session:'``. \"\"\"", "not. If set to ``True``, you have to set ``SECRET_KEY``.", "session interface to use. Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface`", "Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls the", "to ``True``, you have to set ``SECRET_KEY``. Defaults to ``False``.", "from flask_unchained.bundles.sqlalchemy import db except ImportError: db = None class", "session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` -", "``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None \"\"\" The domain for the", "cookie should be set with the ``httponly`` flag. Browsers will", "all of ``APPLICATION_ROOT`` or if that is not set for", "Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\" A prefix", "customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions in the", "over HTTPS for this to make sense. Defaults to ``False``.", "``'Strict'``. Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls", "The maximum number of items the session stores before it", "make sense. Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\"", "will be valid for all of ``APPLICATION_ROOT`` or if that", "None \"\"\" The path for the session cookie. If this", "Defaults to ``'null'``. \"\"\" SESSION_PERMANENT = True \"\"\" Whether use", "will only be modified if the session actually modifies. Non", "session cookie. Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None \"\"\"", "\"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options for the Session", "set, the cookie will be valid for all subdomains of", "sid or not. If set to ``True``, you have to", "except ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session'", "type of session interface to use. Built-in session types: -", "be attached to requests if those requests are \"same-site\". Can", "``0o600``. Defaults to ``0o600``. \"\"\" SESSION_MONGODB = None \"\"\" A", "using a folder named ``flask_sessions`` in your current working directory.", "\"\"\" The name of the session cookie. Defaults to ``'session'``.", "\"\"\" SESSION_FILE_MODE = 0o600 \"\"\" The file mode wanted for", "to using a folder named ``flask_sessions`` in your current working", "set for '/'. Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True", "``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The MongoDB collection you", "\"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The MongoDB collection you want", "the cookie is marked ``secure``. The application must be served", "your current working directory. \"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\" The", "= None \"\"\" Set this if you need to customize", "where session files are stored. Defaults to using a folder", "``SERVER_NAME``. Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH = None \"\"\" The", "set to ``'Lax'`` (recommended) or ``'Strict'``. Defaults to ``None``. \"\"\"", "- ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\" SESSION_PERMANENT = True", "HTTPS for this to make sense. Defaults to ``False``. \"\"\"", "the session files. Should be specified as an octal, eg", "the session cookie. Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None", "expire if the browser window closes. Defaults to ``True``. \"\"\"", "\"\"\" Controls the set-cookie behavior. If set to ``True`` a", "Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface`", "with the ``httponly`` flag. Browsers will not allow JavaScript access", "use. Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db \"\"\" A", "\"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\"", "before it starts deleting some. Defaults to 500. \"\"\" SESSION_FILE_MODE", "are stored. Defaults to using a folder named ``flask_sessions`` in", "import db except ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME", "This makes it possible to use the same backend storage", "be refreshed each request and get their lifetime extended, if", "integer representing seconds. Defaults to 31 days. \"\"\" SESSION_COOKIE_SAMESITE =", "``0o600``. \"\"\" SESSION_MONGODB = None \"\"\" A :class:`pymongo.MongoClient` instance. By", "you want to use. Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL =", "attached to requests if those requests are \"same-site\". Can be", "should be set with the ``httponly`` flag. Browsers will not", "affected by this and will always expire if the browser", "to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\" Set this if", "of a permanent session as ``datetime.timedelta`` object or an integer", "Default configuration options for the Session Bundle. \"\"\" SESSION_TYPE =", "Can be set to ``'Lax'`` (recommended) or ``'Strict'``. Defaults to", "send cookies with requests over HTTPS if the cookie is", "By default, connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None \"\"\"", "to ``True``. \"\"\" SESSION_USE_SIGNER = False \"\"\" Whether sign the", "this to make sense. Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME =", "\"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\" Controls if the cookie should", "``httponly`` for security. Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE = False", "Defaults to ``True``. \"\"\" SESSION_USE_SIGNER = False \"\"\" Whether sign", "working directory. \"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\" The maximum number", "table you want to use. Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL", "name of the SQL table you want to use. Defaults", "\"\"\" The file mode wanted for the session files. Should", "the cookie should be set with the ``secure`` flag. Browsers", "The MongoDB database you want to use. Defaults to ``'flask_session'``.", "use. Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The", "= 'session' \"\"\" The name of the session cookie. Defaults", "are not affected by this and will always expire if", "\"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\" A prefix that is added", "session keys. This makes it possible to use the same", "to 31 days. \"\"\" SESSION_COOKIE_SAMESITE = None \"\"\" Restrict how", "None \"\"\" A :class:`pymongo.MongoClient` instance. By default, connect to ``127.0.0.1:27017``.", "= timedelta(days=31) \"\"\" The lifetime of a permanent session as", "The folder where session files are stored. Defaults to using", "of ``APPLICATION_ROOT`` or if that is not set for '/'.", "set with the ``httponly`` flag. Browsers will not allow JavaScript", "If set to ``True``, you have to set ``SECRET_KEY``. Defaults", "deleting some. Defaults to 500. \"\"\" SESSION_FILE_MODE = 0o600 \"\"\"", "= False \"\"\" Controls if the cookie should be set", "mode wanted for the session files. Should be specified as", "to ``None``. \"\"\" SESSION_COOKIE_PATH = None \"\"\" The path for", "'/'. Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\" Controls", "of the session cookie. Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN =", "= None \"\"\" The path for the session cookie. If", "the ``httponly`` flag. Browsers will not allow JavaScript access to", ":class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\" SESSION_PERMANENT =", "to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\" The MongoDB database", "SESSION_MEMCACHED = None \"\"\" A :class:`memcached.Client` instance. By default, connect", "window closes. Defaults to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default", "all session keys. This makes it possible to use the", "Should be specified as an octal, eg ``0o600``. Defaults to", "Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH = None \"\"\" The path", "for the session cookie. If this is not set, the", "days. \"\"\" SESSION_COOKIE_SAMESITE = None \"\"\" Restrict how cookies are", "same backend storage server for different apps. Defaults to ``'session:'``.", "extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The name of", "= db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE =", "this and will always expire if the browser window closes.", "= os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder where session files are", "cookie sid or not. If set to ``True``, you have", "to ``'session:'``. \"\"\" SESSION_REDIS = None \"\"\" A :class:`redis.Redis` instance.", "instance. By default, connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None", "\"\"\" The domain for the session cookie. If this is", "to requests if those requests are \"same-site\". Can be set", "directory. \"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\" The maximum number of", "file mode wanted for the session files. Should be specified", "set ``SECRET_KEY``. Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\"", "cookie. If this is not set, the cookie will be", "``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder where", "import os from datetime import timedelta from flask_unchained import BundleConfig", "cookie will be valid for all subdomains of ``SERVER_NAME``. Defaults", "session or not. Defaults to ``True``. \"\"\" SESSION_USE_SIGNER = False", "cookie should be set with the ``secure`` flag. Browsers will", "SQL table you want to use. Defaults to ``flask_sessions``. \"\"\"", "Browsers will not allow JavaScript access to cookies marked as", "class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options for the Session Bundle.", "that it will only be attached to requests if those", "want to use. Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions'", "stored. Defaults to using a folder named ``flask_sessions`` in your", "the Session Bundle. \"\"\" SESSION_TYPE = 'null' \"\"\" Specifies which", "= 'null' \"\"\" Specifies which type of session interface to", "Whether use permanent session or not. Defaults to ``True``. \"\"\"", "Defaults to ``0o600``. \"\"\" SESSION_MONGODB = None \"\"\" A :class:`pymongo.MongoClient`", "connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None \"\"\" A :class:`memcached.Client`", "set to ``True`` a permanent session will be refreshed each", "cookies marked as ``httponly`` for security. Defaults to ``True``. \"\"\"", ":class:`pymongo.MongoClient` instance. By default, connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB =", "31 days. \"\"\" SESSION_COOKIE_SAMESITE = None \"\"\" Restrict how cookies", "if you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for", "``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options for the", "SESSION_FILE_THRESHOLD = 500 \"\"\" The maximum number of items the", "not affected by this and will always expire if the", "``True``. \"\"\" SESSION_USE_SIGNER = False \"\"\" Whether sign the session", "want to use. Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None", "``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\" Controls if the cookie", "browser window closes. Defaults to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\"", ":class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface`", "domain for the session cookie. If this is not set,", "valid for all subdomains of ``SERVER_NAME``. Defaults to ``None``. \"\"\"", "served over HTTPS for this to make sense. Defaults to", "use the same backend storage server for different apps. Defaults", "- ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` -", "you want to use. Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY =", "this is not set, the cookie will be valid for", "keys. This makes it possible to use the same backend", "``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``:", "from flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy import db except", "SESSION_COOKIE_HTTPONLY = True \"\"\" Controls if the cookie should be", ":class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\" SESSION_PERMANENT = True \"\"\" Whether", "refreshed each request and get their lifetime extended, if set", "will be refreshed each request and get their lifetime extended,", "sites. Limits the scope of the cookie such that it", "True \"\"\" Whether use permanent session or not. Defaults to", "to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\" Controls if the", "to 500. \"\"\" SESSION_FILE_MODE = 0o600 \"\"\" The file mode", "The application must be served over HTTPS for this to", "permanent sessions are not affected by this and will always", "to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None \"\"\" A :class:`memcached.Client` instance.", "over HTTPS if the cookie is marked ``secure``. The application", "'flask_sessions' \"\"\" The name of the SQL table you want", "cookie will be valid for all of ``APPLICATION_ROOT`` or if", "of the cookie such that it will only be attached", "are sent with requests from external sites. Limits the scope", "wanted for the session files. Should be specified as an", "``True`` a permanent session will be refreshed each request and", "from external sites. Limits the scope of the cookie such", "this if you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used", "the scope of the cookie such that it will only", "for the Session Bundle. \"\"\" SESSION_TYPE = 'null' \"\"\" Specifies", ":class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The name", "it possible to use the same backend storage server for", "named ``flask_sessions`` in your current working directory. \"\"\" SESSION_FILE_THRESHOLD =", "BundleConfig try: from flask_unchained.bundles.sqlalchemy import db except ImportError: db =", "to ``True``. \"\"\" SESSION_COOKIE_SECURE = False \"\"\" Controls if the", "Non permanent sessions are not affected by this and will", "if set to ``False`` it will only be modified if", "class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\" The name of the", "Specifies which type of session interface to use. Built-in session", "Defaults to 500. \"\"\" SESSION_FILE_MODE = 0o600 \"\"\" The file", "a folder named ``flask_sessions`` in your current working directory. \"\"\"", "SESSION_KEY_PREFIX = 'session:' \"\"\" A prefix that is added before", "The domain for the session cookie. If this is not", "session cookie sid or not. If set to ``True``, you", "that is not set for '/'. Defaults to ``None``. \"\"\"", ":class:`memcached.Client` instance. By default, connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR =", "A :class:`pymongo.MongoClient` instance. By default, connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB", "None \"\"\" The domain for the session cookie. If this", "each request and get their lifetime extended, if set to", "\"\"\" A :class:`redis.Redis` instance. By default, connect to ``127.0.0.1:6379``. \"\"\"", "session stores before it starts deleting some. Defaults to 500.", "requests from external sites. Limits the scope of the cookie", "``'session:'``. \"\"\" SESSION_REDIS = None \"\"\" A :class:`redis.Redis` instance. By", "the cookie such that it will only be attached to", "for security. Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE = False \"\"\"", "default, connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\" The", "that is added before all session keys. This makes it", "\"\"\" The lifetime of a permanent session as ``datetime.timedelta`` object", "which type of session interface to use. Built-in session types:", "= True \"\"\" Controls the set-cookie behavior. If set to", "Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options for the Session Bundle. \"\"\"", "SESSION_COOKIE_SECURE = False \"\"\" Controls if the cookie should be", "= None \"\"\" Restrict how cookies are sent with requests", "``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\" SESSION_PERMANENT", "to ``0o600``. \"\"\" SESSION_MONGODB = None \"\"\" A :class:`pymongo.MongoClient` instance.", "cookie is marked ``secure``. The application must be served over", "permanent session or not. Defaults to ``True``. \"\"\" SESSION_USE_SIGNER =", "current working directory. \"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\" The maximum", "such that it will only be attached to requests if", "be set with the ``secure`` flag. Browsers will only send", "the cookie will be valid for all of ``APPLICATION_ROOT`` or", "is not set for '/'. Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY", "= True \"\"\" Controls if the cookie should be set", ":class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface`", "= None \"\"\" A :class:`redis.Redis` instance. By default, connect to", "A prefix that is added before all session keys. This", "``SECRET_KEY``. Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\" A", "\"\"\" The MongoDB collection you want to use. Defaults to", "with the ``secure`` flag. Browsers will only send cookies with", "None \"\"\" A :class:`redis.Redis` instance. By default, connect to ``127.0.0.1:6379``.", "an octal, eg ``0o600``. Defaults to ``0o600``. \"\"\" SESSION_MONGODB =", "object or an integer representing seconds. Defaults to 31 days.", "for all of ``APPLICATION_ROOT`` or if that is not set", "None \"\"\" A :class:`memcached.Client` instance. By default, connect to ``127.0.0.1:11211``.", "an integer representing seconds. Defaults to 31 days. \"\"\" SESSION_COOKIE_SAMESITE", "representing seconds. Defaults to 31 days. \"\"\" SESSION_COOKIE_SAMESITE = None", "to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension", "``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None \"\"\" A :class:`memcached.Client` instance. By", "\"\"\" The path for the session cookie. If this is", "HTTPS if the cookie is marked ``secure``. The application must", "if the cookie should be set with the ``secure`` flag.", "SESSION_FILE_MODE = 0o600 \"\"\" The file mode wanted for the", "\"\"\" SESSION_COOKIE_SECURE = False \"\"\" Controls if the cookie should", "seconds. Defaults to 31 days. \"\"\" SESSION_COOKIE_SAMESITE = None \"\"\"", "SESSION_REDIS = None \"\"\" A :class:`redis.Redis` instance. By default, connect", "= 500 \"\"\" The maximum number of items the session", "Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The lifetime", "files. Should be specified as an octal, eg ``0o600``. Defaults", "datetime import timedelta from flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy", "import timedelta from flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy import", "get their lifetime extended, if set to ``False`` it will", "timedelta from flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy import db", "SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder where session files", "default, connect to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\"", "to make sense. Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31)", "to ``'Lax'`` (recommended) or ``'Strict'``. Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST", "will not allow JavaScript access to cookies marked as ``httponly``", "different apps. Defaults to ``'session:'``. \"\"\" SESSION_REDIS = None \"\"\"", "it starts deleting some. Defaults to 500. \"\"\" SESSION_FILE_MODE =", "some. Defaults to 500. \"\"\" SESSION_FILE_MODE = 0o600 \"\"\" The", "flag. Browsers will not allow JavaScript access to cookies marked", "Defaults to using a folder named ``flask_sessions`` in your current", "from datetime import timedelta from flask_unchained import BundleConfig try: from", "\"\"\" SESSION_SQLALCHEMY = db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\"", "\"\"\" SESSION_USE_SIGNER = False \"\"\" Whether sign the session cookie", "name of the session cookie. Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN", "to use. Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) -", "a permanent session as ``datetime.timedelta`` object or an integer representing", "``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The lifetime of a", "lifetime of a permanent session as ``datetime.timedelta`` object or an", "or not. If set to ``True``, you have to set", "access to cookies marked as ``httponly`` for security. Defaults to", "``flask_sessions`` in your current working directory. \"\"\" SESSION_FILE_THRESHOLD = 500", "db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\" The", "to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options for", "of session interface to use. Built-in session types: - ``'null'``:", "None \"\"\" Restrict how cookies are sent with requests from", "for the session files. Should be specified as an octal,", "set to ``True``, you have to set ``SECRET_KEY``. Defaults to", "True \"\"\" Controls the set-cookie behavior. If set to ``True``", "db except ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME =", "or not. Defaults to ``True``. \"\"\" SESSION_USE_SIGNER = False \"\"\"", "``APPLICATION_ROOT`` or if that is not set for '/'. Defaults", "instance. By default, connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session'", "will only be attached to requests if those requests are", "you want to use. Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT =", "how cookies are sent with requests from external sites. Limits", "for this to make sense. Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME", "request and get their lifetime extended, if set to ``False``", "prefix that is added before all session keys. This makes", "subdomains of ``SERVER_NAME``. Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH = None", "of ``SERVER_NAME``. Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH = None \"\"\"", "interface to use. Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default)", "with requests over HTTPS if the cookie is marked ``secure``.", "session files are stored. Defaults to using a folder named", "to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The lifetime of", "database you want to use. Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT", "to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None \"\"\" The domain for", "if the cookie is marked ``secure``. The application must be", "it will only be attached to requests if those requests", "want to use. Defaults to ``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db", "to ``'null'``. \"\"\" SESSION_PERMANENT = True \"\"\" Whether use permanent", "not set the cookie will be valid for all of", "ImportError: db = None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\"", "to ``False`` it will only be modified if the session", "SESSION_COOKIE_DOMAIN = None \"\"\" The domain for the session cookie.", "False \"\"\" Controls if the cookie should be set with", "is added before all session keys. This makes it possible", "files are stored. Defaults to using a folder named ``flask_sessions``", "as an octal, eg ``0o600``. Defaults to ``0o600``. \"\"\" SESSION_MONGODB", "for all subdomains of ``SERVER_NAME``. Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH", "extended, if set to ``False`` it will only be modified", "SESSION_TYPE = 'null' \"\"\" Specifies which type of session interface", "requests are \"same-site\". Can be set to ``'Lax'`` (recommended) or", "are \"same-site\". Can be set to ``'Lax'`` (recommended) or ``'Strict'``.", "(default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface`", "\"\"\" Whether use permanent session or not. Defaults to ``True``.", "the session cookie. If this is not set, the cookie", "connect to ``127.0.0.1:27017``. \"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\" The MongoDB", "``httponly`` flag. Browsers will not allow JavaScript access to cookies", "``'sessions'``. \"\"\" SESSION_SQLALCHEMY = db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance.", "= None \"\"\" The domain for the session cookie. If", "flag. Browsers will only send cookies with requests over HTTPS", "use. Built-in session types: - ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``:", ":class:`redis.Redis` instance. By default, connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED =", "Defaults to 31 days. \"\"\" SESSION_COOKIE_SAMESITE = None \"\"\" Restrict", "\"\"\" SESSION_COOKIE_PATH = None \"\"\" The path for the session", "The lifetime of a permanent session as ``datetime.timedelta`` object or", "set the cookie will be valid for all of ``APPLICATION_ROOT``", "if the session actually modifies. Non permanent sessions are not", "= None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\" The name", "to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls the set-cookie", "makes it possible to use the same backend storage server", "by this and will always expire if the browser window", ":class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``.", "import BundleConfig try: from flask_unchained.bundles.sqlalchemy import db except ImportError: db", "None class _DefaultFlaskConfigForSessions(BundleConfig): SESSION_COOKIE_NAME = 'session' \"\"\" The name of", "is not set, the cookie will be valid for all", "need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions", "in your current working directory. \"\"\" SESSION_FILE_THRESHOLD = 500 \"\"\"", "with requests from external sites. Limits the scope of the", "Defaults to ``'session:'``. \"\"\" SESSION_REDIS = None \"\"\" A :class:`redis.Redis`", "allow JavaScript access to cookies marked as ``httponly`` for security.", "for '/'. Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY = True \"\"\"", "\"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls the set-cookie behavior. If", "SESSION_SQLALCHEMY_MODEL = None \"\"\" Set this if you need to", "possible to use the same backend storage server for different", "\"\"\" A :class:`memcached.Client` instance. By default, connect to ``127.0.0.1:11211``. \"\"\"", "SESSION_COOKIE_SAMESITE = None \"\"\" Restrict how cookies are sent with", "path for the session cookie. If this is not set", "'session' \"\"\" The name of the session cookie. Defaults to", "\"\"\" The maximum number of items the session stores before", "the cookie will be valid for all subdomains of ``SERVER_NAME``.", "Controls if the cookie should be set with the ``secure``", "and get their lifetime extended, if set to ``False`` it", "set with the ``secure`` flag. Browsers will only send cookies", "MongoDB database you want to use. Defaults to ``'flask_session'``. \"\"\"", "os from datetime import timedelta from flask_unchained import BundleConfig try:", "sense. Defaults to ``False``. \"\"\" PERMANENT_SESSION_LIFETIME = timedelta(days=31) \"\"\" The", "to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing sessions in", "``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults to ``'null'``. \"\"\" SESSION_PERMANENT = True \"\"\"", "\"\"\" SESSION_MEMCACHED = None \"\"\" A :class:`memcached.Client` instance. By default,", "The MongoDB collection you want to use. Defaults to ``'sessions'``.", "or if that is not set for '/'. Defaults to", "False \"\"\" Whether sign the session cookie sid or not.", "``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\" Controls the set-cookie behavior.", "of the SQL table you want to use. Defaults to", "apps. Defaults to ``'session:'``. \"\"\" SESSION_REDIS = None \"\"\" A", "be served over HTTPS for this to make sense. Defaults", "not allow JavaScript access to cookies marked as ``httponly`` for", "\"\"\" Default configuration options for the Session Bundle. \"\"\" SESSION_TYPE", "\"\"\" A prefix that is added before all session keys.", "Restrict how cookies are sent with requests from external sites.", "= True \"\"\" Whether use permanent session or not. Defaults", "session files. Should be specified as an octal, eg ``0o600``.", "Defaults to ``'flask_session'``. \"\"\" SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The MongoDB", "Bundle. \"\"\" SESSION_TYPE = 'null' \"\"\" Specifies which type of", "to ``False``. \"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\" A prefix that", "\"\"\" Controls if the cookie should be set with the", "to set ``SECRET_KEY``. Defaults to ``False``. \"\"\" SESSION_KEY_PREFIX = 'session:'", "500. \"\"\" SESSION_FILE_MODE = 0o600 \"\"\" The file mode wanted", "the SQL table you want to use. Defaults to ``flask_sessions``.", "SESSION_COOKIE_PATH = None \"\"\" The path for the session cookie.", "sessions are not affected by this and will always expire", "``secure``. The application must be served over HTTPS for this", "= 'flask_sessions' \"\"\" The name of the SQL table you", "or ``'Strict'``. Defaults to ``None``. \"\"\" SESSION_REFRESH_EACH_REQUEST = True \"\"\"", "'flask_sessions') \"\"\" The folder where session files are stored. Defaults", "If this is not set the cookie will be valid", "- ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``: :class:`~flask_unchained.bundles.session.session_interfaces.SqlAlchemySessionInterface` Defaults", "flask_unchained import BundleConfig try: from flask_unchained.bundles.sqlalchemy import db except ImportError:", "\"\"\" Restrict how cookies are sent with requests from external", "all subdomains of ``SERVER_NAME``. Defaults to ``None``. \"\"\" SESSION_COOKIE_PATH =", "= None \"\"\" A :class:`pymongo.MongoClient` instance. By default, connect to", "folder where session files are stored. Defaults to using a", "500 \"\"\" The maximum number of items the session stores", "use. Defaults to ``flask_sessions``. \"\"\" SESSION_SQLALCHEMY_MODEL = None \"\"\" Set", "added before all session keys. This makes it possible to", "the cookie should be set with the ``httponly`` flag. Browsers", "use permanent session or not. Defaults to ``True``. \"\"\" SESSION_USE_SIGNER", "specified as an octal, eg ``0o600``. Defaults to ``0o600``. \"\"\"", "'null' \"\"\" Specifies which type of session interface to use.", "before all session keys. This makes it possible to use", "set to ``False`` it will only be modified if the", "Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None \"\"\" The domain", "marked ``secure``. The application must be served over HTTPS for", "\"\"\" SESSION_MONGODB_DB = 'flask_session' \"\"\" The MongoDB database you want", "as ``datetime.timedelta`` object or an integer representing seconds. Defaults to", "Set this if you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass", "``False``. \"\"\" SESSION_KEY_PREFIX = 'session:' \"\"\" A prefix that is", "SESSION_MONGODB_COLLECT = 'sessions' \"\"\" The MongoDB collection you want to", "if those requests are \"same-site\". Can be set to ``'Lax'``", "for different apps. Defaults to ``'session:'``. \"\"\" SESSION_REDIS = None", "to ``127.0.0.1:11211``. \"\"\" SESSION_FILE_DIR = os.path.join(os.getcwd(), 'flask_sessions') \"\"\" The folder", "Session Bundle. \"\"\" SESSION_TYPE = 'null' \"\"\" Specifies which type", "SESSION_SQLALCHEMY = db \"\"\" A :class:`~flask_unchained.bundles.sqlalchemy.SQLAlchemy` extension instance. \"\"\" SESSION_SQLALCHEMY_TABLE", "cookie. Defaults to ``'session'``. \"\"\" SESSION_COOKIE_DOMAIN = None \"\"\" The", "<filename>flask_unchained/bundles/session/config.py import os from datetime import timedelta from flask_unchained import", "The path for the session cookie. If this is not", "0o600 \"\"\" The file mode wanted for the session files.", "modified if the session actually modifies. Non permanent sessions are", "you need to customize the :class:`~flask_unchained.bundles.sqlalchemy.BaseModel` subclass used for storing", "the session cookie sid or not. If set to ``True``,", "modifies. Non permanent sessions are not affected by this and", "to use the same backend storage server for different apps.", "\"\"\" SESSION_SQLALCHEMY_TABLE = 'flask_sessions' \"\"\" The name of the SQL", "of items the session stores before it starts deleting some.", "'session:' \"\"\" A prefix that is added before all session", "The file mode wanted for the session files. Should be", "\"\"\" The folder where session files are stored. Defaults to", "scope of the cookie such that it will only be", "not set for '/'. Defaults to ``None``. \"\"\" SESSION_COOKIE_HTTPONLY =", "cookies with requests over HTTPS if the cookie is marked", "A :class:`redis.Redis` instance. By default, connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED", "cookie. If this is not set the cookie will be", "- ``'null'``: :class:`~flask_unchained.bundles.session.session_interfaces.NullSessionInterface` (default) - ``'redis'``: :class:`~flask_unchained.bundles.session.session_interfaces.RedisSessionInterface` - ``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface`", "\"\"\" SESSION_TYPE = 'null' \"\"\" Specifies which type of session", "Limits the scope of the cookie such that it will", "\"\"\" Whether sign the session cookie sid or not. If", "default, connect to ``127.0.0.1:6379``. \"\"\" SESSION_MEMCACHED = None \"\"\" A", "``datetime.timedelta`` object or an integer representing seconds. Defaults to 31", "the set-cookie behavior. If set to ``True`` a permanent session", "be specified as an octal, eg ``0o600``. Defaults to ``0o600``.", "to ``True`` a permanent session will be refreshed each request", "Whether sign the session cookie sid or not. If set", "configuration options for the Session Bundle. \"\"\" SESSION_TYPE = 'null'", "items the session stores before it starts deleting some. Defaults", "should be set with the ``secure`` flag. Browsers will only", "Defaults to ``True``. \"\"\" class Config(_DefaultFlaskConfigForSessions): \"\"\" Default configuration options", "``'memcached'``: :class:`~flask_unchained.bundles.session.session_interfaces.MemcachedSessionInterface` - ``'filesystem'``: :class:`~flask_unchained.bundles.session.session_interfaces.FileSystemSessionInterface` - ``'mongodb'``: :class:`~flask_unchained.bundles.session.session_interfaces.MongoDBSessionInterface` - ``'sqlalchemy'``:", "marked as ``httponly`` for security. Defaults to ``True``. \"\"\" SESSION_COOKIE_SECURE", "octal, eg ``0o600``. Defaults to ``0o600``. \"\"\" SESSION_MONGODB = None", "storage server for different apps. Defaults to ``'session:'``. \"\"\" SESSION_REDIS", "``None``. \"\"\" SESSION_COOKIE_PATH = None \"\"\" The path for the", "``secure`` flag. Browsers will only send cookies with requests over", "session cookie. If this is not set the cookie will" ]
[ "start to end of forecasting # horizon, but only return", "numpy as np import pandas as pd from sktime.forecasting.base._base import", "end) # statsmodels forecasts all periods from start to end", "optional (default=None) The forecasters horizon with the steps ahead to", "coding: utf-8 -*- __author__ = [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import", "-*- __author__ = [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import numpy as", "only return given time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]", "self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts all", "Returns ------- fitted_params : dict \"\"\" self.check_is_fitted() return { name:", "the steps ahead to to predict. X : pd.DataFrame, optional", "== pd.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh)", "# so we coerce them here to pd.RangeIndex if isinstance(y,", "y, X=None, fh=None): \"\"\"Fit to training data. Parameters ---------- y", "of the training series when passing integers start, end =", "X : pd.DataFrame, optional (default=None) Exogenous variables are ignored Returns", "to training data. Parameters ---------- y : pd.Series Target time", "predict. Default is one-step ahead forecast, i.e. np.array([1]) X :", ": int or list, optional (default=0.95) Returns ------- y_pred :", "fit the forecaster. fh : int, list or np.array, optional", "= fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels", "but only return given time points in forecasting horizon return", "forecaster. fh : int, list or np.array, optional (default=None) The", "and type(y.index) == pd.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y,", "return_pred_int: raise NotImplementedError() # statsmodels requires zero-based indexing starting at", "Parameters ---------- fh : ForecastingHorizon The forecasters horizon with the", "X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\") def _predict(self, fh, X=None,", "\"\"\"Fit to training data. Parameters ---------- y : pd.Series Target", "y.index = new_index if X is not None: X.index =", "which to fit the forecaster. fh : int, list or", ": pd.Series Returns series of predicted values. \"\"\" if return_pred_int:", "fitted parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0],", "The forecasters horizon with the steps ahead to to predict.", "list or np.array, optional (default=None) The forecasters horizon with the", "1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( \"Coercion of", "if X is not None: X.index = new_index return y,", "training series when passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0,", "when passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred", "parameters Returns ------- fitted_params : dict \"\"\" self.check_is_fitted() return {", "ahead to to predict. X : pd.DataFrame, optional (default=None) Exogenous", "start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end)", "if return_pred_int: raise NotImplementedError() # statsmodels requires zero-based indexing starting", "__all__ = [\"_StatsModelsAdapter\"] import numpy as np import pandas as", "coerce them here to pd.RangeIndex if isinstance(y, pd.Series) and type(y.index)", "utf-8 -*- __author__ = [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import numpy", "to end of forecasting # horizon, but only return given", "(default=None) The forecasters horizon with the steps ahead to to", "for name in self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get names of", "so we coerce them here to pd.RangeIndex if isinstance(y, pd.Series)", "variables are ignored Returns ------- self : returns an instance", "horizon, but only return given time points in forecasting horizon", "given time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self):", "\"\"\"Get fitted parameters Returns ------- fitted_params : dict \"\"\" self.check_is_fitted()", "_coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index,", "---------- y : pd.Series Target time series to which to", "i.e. np.array([1]) X : pd.DataFrame, optional (default=None) Exogenous variables are", "pd.Series Returns series of predicted values. \"\"\" if return_pred_int: raise", "np.array, optional (default=None) The forecasters horizon with the steps ahead", "as required, # so we coerce them here to pd.RangeIndex", "alpha : int or list, optional (default=0.95) Returns ------- y_pred", "forecasts. Parameters ---------- fh : ForecastingHorizon The forecasters horizon with", "beginning of the training series when passing integers start, end", "new_index if X is not None: X.index = new_index return", "from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for", "return { name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() } def", "= _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted =", "= pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError:", "Default is one-step ahead forecast, i.e. np.array([1]) X : pd.DataFrame,", "_SktimeForecaster): \"\"\"Base class for interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names =", "steps ahead to to predict. X : pd.DataFrame, optional (default=None)", "# statsmodels requires zero-based indexing starting at the # beginning", "return_pred_int : bool, optional (default=False) alpha : int or list,", "fh=None): \"\"\"Fit to training data. Parameters ---------- y : pd.Series", "import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster", "integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start,", "= new_index if X is not None: X.index = new_index", "time series to which to fit the forecaster. fh :", "self : returns an instance of self. \"\"\" # statsmodels", "ignored. return_pred_int : bool, optional (default=False) alpha : int or", "pd.Series) and type(y.index) == pd.Int64Index: y, X = _coerce_int_to_range_index(y, X)", "`y_train` with a \" \"pd.RangeIndex.\" ) y.index = new_index if", "optional (default=None) Exogenous variables are ignored. return_pred_int : bool, optional", ": bool, optional (default=False) alpha : int or list, optional", "statsmodels forecasting algorithms\"\"\" _fitted_param_names = () def __init__(self): self._forecaster =", "list, optional (default=0.95) Returns ------- y_pred : pd.Series Returns series", "at the # beginning of the training series when passing", "ahead to to predict. Default is one-step ahead forecast, i.e.", "Returns ------- y_pred : pd.Series Returns series of predicted values.", "self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None):", "Exogenous variables are ignored Returns ------- self : returns an", "indexing starting at the # beginning of the training series", "pd.RangeIndex if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index: y, X", "we coerce them here to pd.RangeIndex if isinstance(y, pd.Series) and", "sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for interfacing", "self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() }", "here to pd.RangeIndex if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index:", "in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted parameters", "not support the pd.Int64Index as required, # so we coerce", "self._fitted_forecaster.predict(start, end) # statsmodels forecasts all periods from start to", "def get_fitted_params(self): \"\"\"Get fitted parameters Returns ------- fitted_params : dict", "return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] +", "with the steps ahead to to predict. Default is one-step", "def __init__(self): self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__()", "raise ValueError( \"Coercion of pd.Int64Index to pd.RangeIndex \" \"failed. Please", "sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster):", "them here to pd.RangeIndex if isinstance(y, pd.Series) and type(y.index) ==", "X is not None: X.index = new_index return y, X", "y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError(", "int, list or np.array, optional (default=None) The forecasters horizon with", "instance of self. \"\"\" # statsmodels does not support the", "optional (default=None) Exogenous variables are ignored Returns ------- self :", "values. \"\"\" if return_pred_int: raise NotImplementedError() # statsmodels requires zero-based", "raise NotImplementedError(\"abstract method\") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\"", "# horizon, but only return given time points in forecasting", "horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted parameters Returns -------", "() def __init__(self): self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter,", "fh : int, list or np.array, optional (default=None) The forecasters", "= None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None): \"\"\"Fit", "pd.Int64Index as required, # so we coerce them here to", "np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( \"Coercion of pd.Int64Index to", "the training series when passing integers start, end = fh.to_absolute_int(self._y.index[0],", "with the steps ahead to to predict. X : pd.DataFrame,", "predict. X : pd.DataFrame, optional (default=None) Exogenous variables are ignored", "pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise", "horizon with the steps ahead to to predict. Default is", "pandas as pd from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import", "} def _get_fitted_param_names(self): \"\"\"Get names of fitted parameters\"\"\" return self._fitted_param_names", "fh : ForecastingHorizon The forecasters horizon with the steps ahead", "Make forecasts. Parameters ---------- fh : ForecastingHorizon The forecasters horizon", "the pd.Int64Index as required, # so we coerce them here", "super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None): \"\"\"Fit to training", "self._fit_forecaster(y, X) self._is_fitted = True return self def _fit_forecaster(self, y_train,", "name in self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get names of fitted", "X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted", "# beginning of the training series when passing integers start,", "import numpy as np import pandas as pd from sktime.forecasting.base._base", "dict \"\"\" self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for name in", "from start to end of forecasting # horizon, but only", "method\") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts.", "(default=None) Exogenous variables are ignored. return_pred_int : bool, optional (default=False)", "self def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\")", "class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for interfacing statsmodels forecasting algorithms\"\"\"", "series of predicted values. \"\"\" if return_pred_int: raise NotImplementedError() #", "type(y.index) == pd.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X)", "end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) #", "Parameters ---------- y : pd.Series Target time series to which", "{ name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() } def _get_fitted_param_names(self):", "zero-based indexing starting at the # beginning of the training", "alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters ---------- fh : ForecastingHorizon The", "self. \"\"\" # statsmodels does not support the pd.Int64Index as", "variables are ignored. return_pred_int : bool, optional (default=False) alpha :", "_StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names", "to to predict. Default is one-step ahead forecast, i.e. np.array([1])", "import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for interfacing statsmodels", "self._is_fitted = True return self def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal", ": pd.DataFrame, optional (default=None) Exogenous variables are ignored. return_pred_int :", "to which to fit the forecaster. fh : int, list", "pd.RangeIndex \" \"failed. Please provide `y_train` with a \" \"pd.RangeIndex.\"", "new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index) except", "\"\"\" # statsmodels does not support the pd.Int64Index as required,", "pd.Int64Index to pd.RangeIndex \" \"failed. Please provide `y_train` with a", ": ForecastingHorizon The forecasters horizon with the steps ahead to", "\"pd.RangeIndex.\" ) y.index = new_index if X is not None:", "are ignored Returns ------- self : returns an instance of", "ForecastingHorizon The forecasters horizon with the steps ahead to to", "ahead forecast, i.e. np.array([1]) X : pd.DataFrame, optional (default=None) Exogenous", "to predict. Default is one-step ahead forecast, i.e. np.array([1]) X", "self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return self def _fit_forecaster(self,", "fitted_params : dict \"\"\" self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for", "passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred =", "y_train, X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\") def _predict(self, fh,", "predicted values. \"\"\" if return_pred_int: raise NotImplementedError() # statsmodels requires", "# statsmodels does not support the pd.Int64Index as required, #", "fit(self, y, X=None, fh=None): \"\"\"Fit to training data. Parameters ----------", "X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return self def", "np.array([1]) X : pd.DataFrame, optional (default=None) Exogenous variables are ignored.", "self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)", "an instance of self. \"\"\" # statsmodels does not support", "end of forecasting # horizon, but only return given time", ": returns an instance of self. \"\"\" # statsmodels does", "(default=0.95) Returns ------- y_pred : pd.Series Returns series of predicted", "to fit the forecaster. fh : int, list or np.array,", "X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return", "forecast, i.e. np.array([1]) X : pd.DataFrame, optional (default=None) Exogenous variables", "names of fitted parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index", "of forecasting # horizon, but only return given time points", "_SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class for interfacing statsmodels forecasting", "NotImplementedError() # statsmodels requires zero-based indexing starting at the #", "X : pd.DataFrame, optional (default=None) Exogenous variables are ignored. return_pred_int", "except AssertionError: raise ValueError( \"Coercion of pd.Int64Index to pd.RangeIndex \"", "#!/usr/bin/env python3 -u # -*- coding: utf-8 -*- __author__ =", ": int, list or np.array, optional (default=None) The forecasters horizon", "bool, optional (default=False) alpha : int or list, optional (default=0.95)", "return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted parameters Returns ------- fitted_params", "self).__init__() def fit(self, y, X=None, fh=None): \"\"\"Fit to training data.", "= () def __init__(self): self._forecaster = None self._fitted_forecaster = None", "new_index) except AssertionError: raise ValueError( \"Coercion of pd.Int64Index to pd.RangeIndex", "import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base", "the steps ahead to to predict. Default is one-step ahead", "returns an instance of self. \"\"\" # statsmodels does not", "from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime", "X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1) try: np.testing.assert_array_equal(y.index, new_index)", "forecasters horizon with the steps ahead to to predict. Default", "fitted parameters Returns ------- fitted_params : dict \"\"\" self.check_is_fitted() return", "X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters ---------- fh :", "+ 1) try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( \"Coercion", "_get_fitted_param_names(self): \"\"\"Get names of fitted parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y,", "fit\"\"\" raise NotImplementedError(\"abstract method\") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):", "-*- coding: utf-8 -*- __author__ = [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"]", "of pd.Int64Index to pd.RangeIndex \" \"failed. Please provide `y_train` with", ") y.index = new_index if X is not None: X.index", "a \" \"pd.RangeIndex.\" ) y.index = new_index if X is", "one-step ahead forecast, i.e. np.array([1]) X : pd.DataFrame, optional (default=None)", "pd from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from", "\"Coercion of pd.Int64Index to pd.RangeIndex \" \"failed. Please provide `y_train`", "def fit(self, y, X=None, fh=None): \"\"\"Fit to training data. Parameters", "forecasting algorithms\"\"\" _fitted_param_names = () def __init__(self): self._forecaster = None", "None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None,", "_coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True", "forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted parameters Returns", "import pandas as pd from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime", "-u # -*- coding: utf-8 -*- __author__ = [\"<NAME>\"] __all__", "self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X) self._is_fitted = True return self", "NotImplementedError(\"abstract method\") def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make", ": dict \"\"\" self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for name", ": pd.Series Target time series to which to fit the", "y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted parameters Returns ------- fitted_params :", "def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters", "---------- fh : ForecastingHorizon The forecasters horizon with the steps", "of self. \"\"\" # statsmodels does not support the pd.Int64Index", "= None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self, y,", "statsmodels does not support the pd.Int64Index as required, # so", "[\"_StatsModelsAdapter\"] import numpy as np import pandas as pd from", "True return self def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\" raise", "isinstance(y, pd.Series) and type(y.index) == pd.Int64Index: y, X = _coerce_int_to_range_index(y,", "to pd.RangeIndex \" \"failed. Please provide `y_train` with a \"", "training data. Parameters ---------- y : pd.Series Target time series", "= self._fitted_forecaster.predict(start, end) # statsmodels forecasts all periods from start", "fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts", "for interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names = () def __init__(self):", "\"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\") def _predict(self, fh, X=None, return_pred_int=False,", "Please provide `y_train` with a \" \"pd.RangeIndex.\" ) y.index =", "or np.array, optional (default=None) The forecasters horizon with the steps", "does not support the pd.Int64Index as required, # so we", "algorithms\"\"\" _fitted_param_names = () def __init__(self): self._forecaster = None self._fitted_forecaster", "is one-step ahead forecast, i.e. np.array([1]) X : pd.DataFrame, optional", "required, # so we coerce them here to pd.RangeIndex if", "return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters ---------- fh : ForecastingHorizon", "# -*- coding: utf-8 -*- __author__ = [\"<NAME>\"] __all__ =", "the # beginning of the training series when passing integers", "y : pd.Series Target time series to which to fit", "self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get names", "= [\"_StatsModelsAdapter\"] import numpy as np import pandas as pd", "ignored Returns ------- self : returns an instance of self.", "None super(_StatsModelsAdapter, self).__init__() def fit(self, y, X=None, fh=None): \"\"\"Fit to", "all periods from start to end of forecasting # horizon,", "_fitted_param_names = () def __init__(self): self._forecaster = None self._fitted_forecaster =", "starting at the # beginning of the training series when", "------- fitted_params : dict \"\"\" self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name)", "optional (default=0.95) Returns ------- y_pred : pd.Series Returns series of", "__init__(self): self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def", "_predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters ----------", "pd.Int64Index: y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y,", "AssertionError: raise ValueError( \"Coercion of pd.Int64Index to pd.RangeIndex \" \"failed.", "int or list, optional (default=0.95) Returns ------- y_pred : pd.Series", "as np import pandas as pd from sktime.forecasting.base._base import DEFAULT_ALPHA", "to pd.RangeIndex if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index: y,", "# statsmodels forecasts all periods from start to end of", "time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get", "pd.Series Target time series to which to fit the forecaster.", "as pd from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin", "_fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\") def _predict(self,", "-1]] y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts all periods", "with a \" \"pd.RangeIndex.\" ) y.index = new_index if X", "forecasters horizon with the steps ahead to to predict. X", "the forecaster. fh : int, list or np.array, optional (default=None)", "points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def get_fitted_params(self): \"\"\"Get fitted", "from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin,", "\"\"\"Base class for interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names = ()", "Exogenous variables are ignored. return_pred_int : bool, optional (default=False) alpha", "in self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get names of fitted parameters\"\"\"", "try: np.testing.assert_array_equal(y.index, new_index) except AssertionError: raise ValueError( \"Coercion of pd.Int64Index", "y_pred = self._fitted_forecaster.predict(start, end) # statsmodels forecasts all periods from", "DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class", "pd.DataFrame, optional (default=None) Exogenous variables are ignored. return_pred_int : bool,", "[\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import numpy as np import pandas", "(default=False) alpha : int or list, optional (default=0.95) Returns -------", "fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): \"\"\" Make forecasts. Parameters ---------- fh", "statsmodels forecasts all periods from start to end of forecasting", "X) self._is_fitted = True return self def _fit_forecaster(self, y_train, X_train=None):", "ValueError( \"Coercion of pd.Int64Index to pd.RangeIndex \" \"failed. Please provide", "of predicted values. \"\"\" if return_pred_int: raise NotImplementedError() # statsmodels", "pd.DataFrame, optional (default=None) Exogenous variables are ignored Returns ------- self", "Target time series to which to fit the forecaster. fh", "if isinstance(y, pd.Series) and type(y.index) == pd.Int64Index: y, X =", "\" \"failed. Please provide `y_train` with a \" \"pd.RangeIndex.\" )", "data. Parameters ---------- y : pd.Series Target time series to", "y_pred : pd.Series Returns series of predicted values. \"\"\" if", "statsmodels requires zero-based indexing starting at the # beginning of", "(default=None) Exogenous variables are ignored Returns ------- self : returns", "_OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import _SktimeForecaster class _StatsModelsAdapter(_OptionalForecastingHorizonMixin, _SktimeForecaster): \"\"\"Base class", "steps ahead to to predict. Default is one-step ahead forecast,", "support the pd.Int64Index as required, # so we coerce them", "y, X = _coerce_int_to_range_index(y, X) self._set_y_X(y, X) self._set_fh(fh) self._fit_forecaster(y, X)", "def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1) try:", "raise NotImplementedError() # statsmodels requires zero-based indexing starting at the", "\"\"\"Get names of fitted parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None):", "\"failed. Please provide `y_train` with a \" \"pd.RangeIndex.\" ) y.index", "return self def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract", "series to which to fit the forecaster. fh : int,", "\"\"\" if return_pred_int: raise NotImplementedError() # statsmodels requires zero-based indexing", "optional (default=False) alpha : int or list, optional (default=0.95) Returns", "series when passing integers start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]", "to to predict. X : pd.DataFrame, optional (default=None) Exogenous variables", "Returns series of predicted values. \"\"\" if return_pred_int: raise NotImplementedError()", "self._forecaster = None self._fitted_forecaster = None super(_StatsModelsAdapter, self).__init__() def fit(self,", "def _get_fitted_param_names(self): \"\"\"Get names of fitted parameters\"\"\" return self._fitted_param_names def", "interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names = () def __init__(self): self._forecaster", "np import pandas as pd from sktime.forecasting.base._base import DEFAULT_ALPHA from", "periods from start to end of forecasting # horizon, but", "provide `y_train` with a \" \"pd.RangeIndex.\" ) y.index = new_index", "Returns ------- self : returns an instance of self. \"\"\"", "horizon with the steps ahead to to predict. X :", "get_fitted_params(self): \"\"\"Get fitted parameters Returns ------- fitted_params : dict \"\"\"", "self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get names of fitted parameters\"\"\" return", "name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names() } def _get_fitted_param_names(self): \"\"\"Get", ": pd.DataFrame, optional (default=None) Exogenous variables are ignored Returns -------", "def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\" raise NotImplementedError(\"abstract method\") def", "return given time points in forecasting horizon return y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()] def", "\"\"\" Make forecasts. Parameters ---------- fh : ForecastingHorizon The forecasters", "sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base._sktime import _OptionalForecastingHorizonMixin from sktime.forecasting.base._sktime import", "or list, optional (default=0.95) Returns ------- y_pred : pd.Series Returns", "forecasts all periods from start to end of forecasting #", "parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index = pd.RangeIndex(y.index[0], y.index[-1]", "python3 -u # -*- coding: utf-8 -*- __author__ = [\"<NAME>\"]", "------- self : returns an instance of self. \"\"\" #", "= True return self def _fit_forecaster(self, y_train, X_train=None): \"\"\"Internal fit\"\"\"", "X=None, fh=None): \"\"\"Fit to training data. Parameters ---------- y :", "= [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import numpy as np import", "to predict. X : pd.DataFrame, optional (default=None) Exogenous variables are", "\" \"pd.RangeIndex.\" ) y.index = new_index if X is not", "requires zero-based indexing starting at the # beginning of the", "forecasting # horizon, but only return given time points in", "------- y_pred : pd.Series Returns series of predicted values. \"\"\"", "class for interfacing statsmodels forecasting algorithms\"\"\" _fitted_param_names = () def", "are ignored. return_pred_int : bool, optional (default=False) alpha : int", "\"\"\" self.check_is_fitted() return { name: self._fitted_forecaster.params.get(name) for name in self._get_fitted_param_names()", "of fitted parameters\"\"\" return self._fitted_param_names def _coerce_int_to_range_index(y, X=None): new_index =", "__author__ = [\"<NAME>\"] __all__ = [\"_StatsModelsAdapter\"] import numpy as np" ]
[ "= 0. self.ixx = 0. self.ixy = 0. self.ixz =", "inertia float64 iyz # moment of inertia float64 izz #", "composed of position and orientation. Point position Quaternion orientation ================================================================================", "as e: raise genpy.DeserializationError(e) # most likely buffer underfill def", "encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\"", "of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end", "will be assigned a default value. The recommend use is", "self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))", "edit.\"\"\" import codecs import sys python3 = True if sys.hexversion", "message into buffer :param buff: buffer, ``StringIO`` \"\"\" try: _x", "center of mass location in link frame # and orientation", "'%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy):", "python3 = True if sys.hexversion > 0x03000000 else False import", "field values, in .msg order :param kwds: use keyword arguments", "try: if self.com is None: self.com = geometry_msgs.msg.Pose() end =", "= _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as", ":param numpy: numpy python module \"\"\" try: _x = self", "0. if self.iyz is None: self.iyz = 0. if self.izz", "self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return self", "114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass,", "= end end += length if python3: self.status_message = str[start:end].decode('utf-8',", "import sys python3 = True if sys.hexversion > 0x03000000 else", "str, numpy): \"\"\" unpack serialized message in str into this", "end = 0 start = end end += 4 (length,)", "0 _x = self start = end end += 114", "self.mass = 0. self.ixx = 0. self.ixy = 0. self.ixz", "False self.mass = 0. self.ixx = 0. self.ixy = 0.", "geometry_msgs/Pose # A representation of pose in free space, composed", "be assigned a default value. The recommend use is keyword", "field names to set specific fields. \"\"\" if args or", "args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields cannot", "_type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False # flag to mark", "successful string status_message # comments if available ================================================================================ MSG: geometry_msgs/Pose", "= 0. if self.ixy is None: self.ixy = 0. if", "buffer :param buff: buffer, ``StringIO`` \"\"\" try: _x = self.link_name", "= len(_x) if python3 or type(_x) == unicode: _x =", "= self start = end end += 114 (_x.com.position.x, _x.com.position.y,", "self.com = geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode = False", "if self.mass is None: self.mass = 0. if self.ixx is", "self.success = False self.status_message = '' def _get_types(self): \"\"\" internal", "geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header", "future message changes. You cannot mix in-order arguments and keyword", "# center of mass location in link frame # and", "most likely buffer underfill def serialize_numpy(self, buff, numpy): \"\"\" serialize", "\"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False # flag to", "message with numpy array types into buffer :param buff: buffer,", "\"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import codecs", "self.ixy = 0. self.ixz = 0. self.iyy = 0. self.iyz", "= '' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode = False self.mass", "when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self,", "and orientation. Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point #", "fields. \"\"\" if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) #", "> 0x03000000 else False import genpy import struct import geometry_msgs.msg", "= bool(self.gravity_mode) self.success = bool(self.success) start = end end +=", "serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com", "is None: self.iyy = 0. if self.iyz is None: self.iyz", "A representation of pose in free space, composed of position", "are prefixed by model name, e.g. pr2::base_link \"\"\" __slots__ =", "of inertia bool success # return true if get info", "genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import codecs import sys", "None, assign default values for those that are if self.com", "(type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): \"\"\" unpack", "is None: self.ixy = 0. if self.ixz is None: self.ixz", "python module \"\"\" try: _x = self.link_name length = len(_x)", "'%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): \"\"\"", "if self.com is None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode is", "# moment of inertia float64 iyy # moment of inertia", "genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB = None", "str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): \"\"\" unpack serialized", "w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self,", "mass location in link frame # and orientation of the", "float64 z float64 w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types =", "geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode = False if self.mass", "values for those that are if self.link_name is None: self.link_name", "``StringIO`` \"\"\" try: _x = self.link_name length = len(_x) if", "genpy import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\"", "GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False", "sys.hexversion > 0x03000000 else False import genpy import struct import", "GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False", "likely buffer underfill def serialize_numpy(self, buff, numpy): \"\"\" serialize message", "in-order arguments and keyword arguments. The available fields are: link_name", "= _struct_I.unpack(str[start:end]) start = end end += length if python3:", "self.success is None: self.success = False if self.status_message is None:", "pr2::base_link \"\"\" __slots__ = ['link_name'] _slot_types = ['string'] def __init__(self,", "sys python3 = True if sys.hexversion > 0x03000000 else False", "self.link_name = str[start:end] return self except struct.error as e: raise", ":param buff: buffer, ``StringIO`` \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x,", "str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e) #", "numpy): \"\"\" serialize message with numpy array types into buffer", "__slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds):", "MSG: geometry_msgs/Quaternion # This represents an orientation in free space", "writing '%s'\" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as", "link frame bool gravity_mode # set gravity mode on/off float64", "when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError", "space in quaternion form. float64 x float64 y float64 z", "# comments if available ================================================================================ MSG: geometry_msgs/Pose # A representation", "= ['link_name'] _slot_types = ['string'] def __init__(self, *args, **kwds): \"\"\"", "end end += length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg')", "_x.izz, _x.success)) _x = self.status_message length = len(_x) if python3", "link_name :param args: complete set of field values, in .msg", "= self._type try: if self.com is None: self.com = geometry_msgs.msg.Pose()", "\"\"\"string link_name # name of link # link names are", "= False if self.status_message is None: self.status_message = '' else:", "is successful string status_message # comments if available ================================================================================ MSG:", "of a point in free space float64 x float64 y", "self.status_message = str[start:end] return self except struct.error as e: raise", "import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type", "in free space float64 x float64 y float64 z ================================================================================", "underfill def serialize_numpy(self, buff, numpy): \"\"\" serialize message with numpy", "0. if self.ixx is None: self.ixx = 0. if self.ixy", "names are prefixed by model name, e.g. pr2::base_link \"\"\" __slots__", "__slots__ = ['link_name'] _slot_types = ['string'] def __init__(self, *args, **kwds):", "python module \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z,", "if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end]", "of inertia float64 iyz # moment of inertia float64 izz", "model name, e.g. pr2::base_link \"\"\" __slots__ = ['link_name'] _slot_types =", "numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if", "= self._type try: end = 0 start = end end", "end = 0 _x = self start = end end", "cannot mix in-order arguments and keyword arguments. The available fields", "if self.status_message is None: self.status_message = '' else: self.com =", "False import genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\"", "link_name # name of link # link names are prefixed", "fields. \"\"\" if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) #", "genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff, numpy):", "\"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import codecs", "self.iyz = 0. self.izz = 0. self.success = False self.status_message", "or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields cannot be", "inertia float64 izz # moment of inertia bool success #", "that are if self.link_name is None: self.link_name = '' else:", "0. if self.ixy is None: self.ixy = 0. if self.ixz", "array of serialized message, ``str`` :param numpy: numpy python module", "'' else: self.link_name = '' def _get_types(self): \"\"\" internal API", "# moment of inertia float64 ixz # moment of inertia", "start = end end += length if python3: self.status_message =", "'' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode = False self.mass =", "available fields are: link_name :param args: complete set of field", "= \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False # flag to mark the", "if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end]", "izz # moment of inertia bool success # return true", "class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header =", "message changes. You cannot mix in-order arguments and keyword arguments.", "buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python module", "info is successful string status_message # comments if available ================================================================================", "cannot be None, assign default values for those that are", "is None: self.gravity_mode = False if self.mass is None: self.mass", "= 0. if self.ixx is None: self.ixx = 0. if", "= 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class = GetLinkPropertiesRequest _response_class =", "= _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start =", "_struct_I.unpack(str[start:end]) start = end end += length if python3: self.link_name", "self.iyz is None: self.iyz = 0. if self.izz is None:", "orientation. Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This", "float64 iyz # moment of inertia float64 izz # moment", "is None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode", "that are implicitly/explicitly set to None will be assigned a", "geometry_msgs/Quaternion # This represents an orientation in free space in", "def _get_struct_I(): global _struct_I return _struct_I # This Python file", "(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx,", "str(te), str(locals().get('_x', self))))) def deserialize(self, str): \"\"\" unpack serialized message", "None: self.iyz = 0. if self.izz is None: self.izz =", "recommend use is keyword arguments as this is more robust", "else: self.com = geometry_msgs.msg.Pose() self.gravity_mode = False self.mass = 0.", "_x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success", "numpy: numpy python module \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x,", "serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end =", "te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x',", "keyword arguments corresponding to message field names to set specific", "set of field values, in .msg order :param kwds: use", "this is more robust to future message changes. You cannot", "_x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz,", "= ['string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any message", "self.ixy is None: self.ixy = 0. if self.ixz is None:", "self.gravity_mode is None: self.gravity_mode = False if self.mass is None:", "the presence of a Header object _full_text = \"\"\"string link_name", "in quaternion form. float64 x float64 y float64 z float64", "``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is None:", "else False import genpy import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message):", "self._type try: if self.com is None: self.com = geometry_msgs.msg.Pose() end", "self.ixy = 0. if self.ixz is None: self.ixz = 0.", "self.success = bool(self.success) start = end end += 4 (length,)", "to set specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesRequest,", "types into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy", "order :param kwds: use keyword arguments corresponding to message field", "is None: self.mass = 0. if self.ixx is None: self.ixx", "free space, composed of position and orientation. Point position Quaternion", "_x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success)", "Python file uses the following encoding: utf-8 \"\"\"autogenerated by genpy", "use is keyword arguments as this is more robust to", "assign default values for those that are if self.link_name is", "set specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args,", "y float64 z float64 w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types", "fields that are implicitly/explicitly set to None will be assigned", "``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0 start", "0. if self.ixz is None: self.ixz = 0. if self.iyy", "if self.iyz is None: self.iyz = 0. if self.izz is", "buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing", "# link names are prefixed by model name, e.g. pr2::base_link", "position of a point in free space float64 x float64", "str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return self except struct.error", "# name of link # link names are prefixed by", "# and orientation of the moment of inertias # relative", "False if self.mass is None: self.mass = 0. if self.ixx", "_x = self start = end end += 114 (_x.com.position.x,", "= genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I # This", "geometry_msgs.msg.Pose() self.gravity_mode = False self.mass = 0. self.ixx = 0.", "False if self.status_message is None: self.status_message = '' else: self.com", "length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name =", "super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields cannot be None, assign", "if self.izz is None: self.izz = 0. if self.success is", "y float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This represents an", "object _full_text = \"\"\"string link_name # name of link #", "length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error(\"%s:", "a Header object _full_text = \"\"\"string link_name # name of", "else False import genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum =", "self.com is None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode is None:", "of position and orientation. Point position Quaternion orientation ================================================================================ MSG:", "None: self.mass = 0. if self.ixx is None: self.ixx =", "_x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message", "MSG: geometry_msgs/Pose # A representation of pose in free space,", "arguments. The available fields are: link_name :param args: complete set", "the following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do", "in free space, composed of position and orientation. Point position", "x float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This", "# This represents an orientation in free space in quaternion", "Constructor. Any message fields that are implicitly/explicitly set to None", "return _struct_I _struct_7dB7dB = None def _get_struct_7dB7dB(): global _struct_7dB7dB if", "raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self, buff,", "def serialize_numpy(self, buff, numpy): \"\"\" serialize message with numpy array", "self.link_name length = len(_x) if python3 or type(_x) == unicode:", "str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing", "import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\"", "The available fields are: link_name :param args: complete set of", "if self.link_name is None: self.link_name = '' else: self.link_name =", "space float64 x float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion", "float64 x float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion #", "= struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum", "numpy for array types :param str: byte array of serialized", ":param numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try:", "``StringIO`` \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x,", "an orientation in free space in quaternion form. float64 x", "_get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start = end", "_x.success)) _x = self.status_message length = len(_x) if python3 or", "str(locals().get('_x', self))))) def deserialize(self, str): \"\"\" unpack serialized message in", "None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object): _type =", "['link_name'] _slot_types = ['string'] def __init__(self, *args, **kwds): \"\"\" Constructor.", "= 0. self.iyz = 0. self.izz = 0. self.success =", "self.izz = 0. self.success = False self.status_message = '' def", "as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te),", "def deserialize(self, str): \"\"\" unpack serialized message in str into", "set specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args,", "link frame # and orientation of the moment of inertias", "'' def _get_types(self): \"\"\" internal API method \"\"\" return self._slot_types", "\"\"\" unpack serialized message in str into this message instance", "bool(self.success) start = end end += 4 (length,) = _struct_I.unpack(str[start:end])", "codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is None: self.com =", "import genpy import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum =", "serialized message, ``str`` :param numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type", "fields are: link_name :param args: complete set of field values,", "end end += length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg')", "['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any message fields", "of pose in free space, composed of position and orientation.", "buffer :param buff: buffer, ``StringIO`` \"\"\" try: _x = self", "as e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I", "available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of field", "and keyword arguments. The available fields are: link_name :param args:", "buff: buffer, ``StringIO`` \"\"\" try: _x = self.link_name length =", "default values for those that are if self.link_name is None:", "linear mass of link float64 ixx # moment of inertia", "self start = end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z,", "= 0. self.ixy = 0. self.ixz = 0. self.iyy =", "_x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w,", "``str`` :param numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type", "= 0 _x = self start = end end +=", "specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds)", "False import genpy import struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum", "deserialize(self, str): \"\"\" unpack serialized message in str into this", "type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length,", "serialize(self, buff): \"\"\" serialize message into buffer :param buff: buffer,", "str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): \"\"\" unpack serialized message", "fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of field values,", "genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I def", "_x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy,", "This represents an orientation in free space in quaternion form.", "= False # flag to mark the presence of a", "\"\"\" Constructor. Any message fields that are implicitly/explicitly set to", "else: self.link_name = str[start:end] return self except struct.error as e:", "str: byte array of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type =", "0. self.success = False self.status_message = '' def _get_types(self): \"\"\"", "true if get info is successful string status_message # comments", "start = end end += length if python3: self.link_name =", "when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self,", "_md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False #", "inertias # relative to the link frame bool gravity_mode #", "comments if available ================================================================================ MSG: geometry_msgs/Pose # A representation of", "_struct_I return _struct_I # This Python file uses the following", "None: self.ixz = 0. if self.iyy is None: self.iyy =", "arguments as this is more robust to future message changes.", "_md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False #", "buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return", "object _full_text = \"\"\"geometry_msgs/Pose com # center of mass location", "if _struct_7dB7dB is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class", "kwds: use keyword arguments corresponding to message field names to", "\"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False # flag to mark the presence", "str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError(\"%s: '%s' when", "end += length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else:", "'%s'\" % (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te:", "message field names to set specific fields. \"\"\" if args", "ixz # moment of inertia float64 iyy # moment of", ".msg order :param kwds: use keyword arguments corresponding to message", "self.status_message length = len(_x) if python3 or type(_x) == unicode:", "sys.hexversion > 0x03000000 else False import genpy import struct class", "= self.link_name length = len(_x) if python3 or type(_x) ==", "moment of inertias # relative to the link frame bool", "None: self.gravity_mode = False if self.mass is None: self.mass =", "most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global", "_x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode)", "get info is successful string status_message # comments if available", "0. self.ixy = 0. self.ixz = 0. self.iyy = 0.", "GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class = GetLinkPropertiesRequest", "def deserialize_numpy(self, str, numpy): \"\"\" unpack serialized message in str", "super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields cannot be None, assign", "# relative to the link frame bool gravity_mode # set", "self.iyy = 0. if self.iyz is None: self.iyz = 0.", "struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum =", "= 0. self.iyy = 0. self.iyz = 0. self.izz =", "self.link_name = '' def _get_types(self): \"\"\" internal API method \"\"\"", "except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" %", "self.com = geometry_msgs.msg.Pose() self.gravity_mode = False self.mass = 0. self.ixx", "serialize message with numpy array types into buffer :param buff:", "genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import codecs import sys", "utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import", "z float64 w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string']", "== unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x))", "set to None will be assigned a default value. The", "arguments and keyword arguments. The available fields are: link_name :param", "deserialize_numpy(self, str, numpy): \"\"\" unpack serialized message in str into", "= \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False # flag to mark the", "% (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): \"\"\" unpack", "keyword arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete", "message instance using numpy for array types :param str: byte", "keyword arguments. The available fields are: link_name :param args: complete", "iyy # moment of inertia float64 iyz # moment of", "if self.iyy is None: self.iyy = 0. if self.iyz is", "struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se),", "Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This contains the position", "in str into this message instance using numpy for array", "success # return true if get info is successful string", "# message fields cannot be None, assign default values for", "message instance :param str: byte array of serialized message, ``str``", "end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w,", "= False self.status_message = '' def _get_types(self): \"\"\" internal API", "form. float64 x float64 y float64 z float64 w \"\"\"", "message in str into this message instance using numpy for", "fields cannot be None, assign default values for those that", "float64 y float64 z float64 w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message']", "into this message instance :param str: byte array of serialized", "self).__init__(*args, **kwds) # message fields cannot be None, assign default", "implicitly/explicitly set to None will be assigned a default value.", "com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of field values, in .msg", "``StringIO`` :param numpy: numpy python module \"\"\" try: _x =", "global _struct_I return _struct_I # This Python file uses the", "module \"\"\" try: _x = self.link_name length = len(_x) if", "numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end", "def _get_types(self): \"\"\" internal API method \"\"\" return self._slot_types def", "The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of", "_x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message length =", "float64 iyy # moment of inertia float64 iyz # moment", "% (type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError(\"%s:", "assign default values for those that are if self.com is", "to None will be assigned a default value. The recommend", "# set gravity mode on/off float64 mass # linear mass", "message fields that are implicitly/explicitly set to None will be", "else: self.link_name = '' def _get_types(self): \"\"\" internal API method", "by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import codecs import", "the link frame bool gravity_mode # set gravity mode on/off", "of serialized message, ``str`` :param numpy: numpy python module \"\"\"", "_slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any", "unpack serialized message in str into this message instance using", "\"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False # flag to", "of a Header object _full_text = \"\"\"geometry_msgs/Pose com # center", "except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" %", "on/off float64 mass # linear mass of link float64 ixx", "following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not", "in-order arguments and keyword arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message", "python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return", "# most likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I():", "Header object _full_text = \"\"\"string link_name # name of link", "= 0. if self.iyz is None: self.iyz = 0. if", "self.success = False if self.status_message is None: self.status_message = ''", "kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields cannot be None,", "self.gravity_mode = False self.mass = 0. self.ixx = 0. self.ixy", "following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not", "self.ixx is None: self.ixx = 0. if self.ixy is None:", "complete set of field values, in .msg order :param kwds:", "bool gravity_mode # set gravity mode on/off float64 mass #", "if self.ixz is None: self.ixz = 0. if self.iyy is", "message, ``str`` :param numpy: numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type =", "mode on/off float64 mass # linear mass of link float64", "return self except struct.error as e: raise genpy.DeserializationError(e) # most", "bool success # return true if get info is successful", "Any message fields that are implicitly/explicitly set to None will", "_struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB", ":param buff: buffer, ``StringIO`` \"\"\" try: _x = self.link_name length", "numpy array types into buffer :param buff: buffer, ``StringIO`` :param", "assigned a default value. The recommend use is keyword arguments", "using numpy for array types :param str: byte array of", "= genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB =", "message fields cannot be None, assign default values for those", "'%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self))))) except", "\"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False # flag to mark the presence", "_slot_types = ['string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any", "moment of inertia float64 izz # moment of inertia bool", "\"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is None: self.com", "= end end += 4 (length,) = _struct_I.unpack(str[start:end]) start =", "are: link_name :param args: complete set of field values, in", "kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields cannot be None,", "return _struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821'", "_struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties'", "value. The recommend use is keyword arguments as this is", "# flag to mark the presence of a Header object", "byte array of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type", "are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set of field values, in", "those that are if self.com is None: self.com = geometry_msgs.msg.Pose()", "relative to the link frame bool gravity_mode # set gravity", "gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import codecs import sys python3 =", "unpack serialized message in str into this message instance :param", "serialize_numpy(self, buff, numpy): \"\"\" serialize message with numpy array types", "gravity_mode # set gravity mode on/off float64 mass # linear", "mass # linear mass of link float64 ixx # moment", "is None: self.ixz = 0. if self.iyy is None: self.iyy", "_struct_I.unpack(str[start:end]) start = end end += length if python3: self.status_message", "'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class = GetLinkPropertiesRequest _response_class = GetLinkPropertiesResponse", "a default value. The recommend use is keyword arguments as", "self._slot_types def serialize(self, buff): \"\"\" serialize message into buffer :param", "mix in-order arguments and keyword arguments. The available fields are:", "moment of inertia float64 ixz # moment of inertia float64", "_x.iyz, _x.izz, _x.success)) _x = self.status_message length = len(_x) if", ":param kwds: use keyword arguments corresponding to message field names", "float64 ixy # moment of inertia float64 ixz # moment", "buff: buffer, ``StringIO`` :param numpy: numpy python module \"\"\" try:", "self.status_message = '' def _get_types(self): \"\"\" internal API method \"\"\"", "str into this message instance using numpy for array types", "module \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x,", "*args, **kwds): \"\"\" Constructor. Any message fields that are implicitly/explicitly", "gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import codecs import sys python3 =", "float64 y float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This represents", "self.mass is None: self.mass = 0. if self.ixx is None:", "raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I = genpy.struct_I", "def __init__(self, *args, **kwds): \"\"\" Constructor. Any message fields that", "with numpy array types into buffer :param buff: buffer, ``StringIO``", "class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header =", "================================================================================ MSG: geometry_msgs/Pose # A representation of pose in free", "array of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try:", "4 (length,) = _struct_I.unpack(str[start:end]) start = end end += length", "self except struct.error as e: raise genpy.DeserializationError(e) # most likely", "are if self.com is None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode", "+= length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message", "self.link_name = '' else: self.link_name = '' def _get_types(self): \"\"\"", "self.ixz = 0. self.iyy = 0. self.iyz = 0. self.izz", "or type(_x) == unicode: _x = _x.encode('utf-8') length = len(_x)", "writing '%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str):", "use keyword arguments corresponding to message field names to set", "array types :param str: byte array of serialized message, ``str``", "_x)) except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\"", "_x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,)", "to message field names to set specific fields. \"\"\" if", "moment of inertia float64 iyy # moment of inertia float64", "['string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any message fields", "e: raise genpy.DeserializationError(e) # most likely buffer underfill _struct_I =", "space, composed of position and orientation. Point position Quaternion orientation", "into buffer :param buff: buffer, ``StringIO`` \"\"\" try: _x =", "buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx,", "**kwds): \"\"\" Constructor. Any message fields that are implicitly/explicitly set", "# This contains the position of a point in free", "['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): \"\"\" Constructor.", "0. self.ixz = 0. self.iyy = 0. self.iyz = 0.", "self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass,", "False # flag to mark the presence of a Header", "pose in free space, composed of position and orientation. Point", "quaternion form. float64 x float64 y float64 z float64 w", "= ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): \"\"\" Constructor. Any message", "import codecs import sys python3 = True if sys.hexversion >", "values, in .msg order :param kwds: use keyword arguments corresponding", "str into this message instance :param str: byte array of", "_x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message length", "from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import codecs import sys python3", "None, assign default values for those that are if self.link_name", "se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x',", "underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I", "moment of inertia bool success # return true if get", "python3 or type(_x) == unicode: _x = _x.encode('utf-8') length =", "presence of a Header object _full_text = \"\"\"geometry_msgs/Pose com #", "self.iyy = 0. self.iyz = 0. self.izz = 0. self.success", "+= 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode,", "_x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) =", "<gh_stars>0 # This Python file uses the following encoding: utf-8", "struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header", "for those that are if self.link_name is None: self.link_name =", "encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\"", "and orientation of the moment of inertias # relative to", "length = len(_x) if python3 or type(_x) == unicode: _x", "self.gravity_mode = False if self.mass is None: self.mass = 0.", "try: end = 0 start = end end += 4", "self.iyy is None: self.iyy = 0. if self.iyz is None:", "_struct_I _struct_7dB7dB = None def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB", "buffer underfill def serialize_numpy(self, buff, numpy): \"\"\" serialize message with", "numpy: numpy python module \"\"\" try: _x = self.link_name length", "corresponding to message field names to set specific fields. \"\"\"", "or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields cannot be", "\"\"\"geometry_msgs/Pose com # center of mass location in link frame", "values for those that are if self.com is None: self.com", "link # link names are prefixed by model name, e.g.", "are if self.link_name is None: self.link_name = '' else: self.link_name", "self._type try: end = 0 start = end end +=", "mark the presence of a Header object _full_text = \"\"\"geometry_msgs/Pose", "arguments corresponding to message field names to set specific fields.", "message in str into this message instance :param str: byte", "float64 ixz # moment of inertia float64 iyy # moment", "of field values, in .msg order :param kwds: use keyword", "self))))) except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\"", "# return true if get info is successful string status_message", "if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message fields", "geometry_msgs.msg.Pose() end = 0 _x = self start = end", "Header object _full_text = \"\"\"geometry_msgs/Pose com # center of mass", "_struct_I return _struct_I _struct_7dB7dB = None def _get_struct_7dB7dB(): global _struct_7dB7dB", "global _struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return", "of inertia float64 ixz # moment of inertia float64 iyy", "except struct.error as e: raise genpy.DeserializationError(e) # most likely buffer", "if python3 or type(_x) == unicode: _x = _x.encode('utf-8') length", "if self.success is None: self.success = False if self.status_message is", "flag to mark the presence of a Header object _full_text", "mark the presence of a Header object _full_text = \"\"\"string", "that are if self.com is None: self.com = geometry_msgs.msg.Pose() if", "= \"\"\"string link_name # name of link # link names", "(length,) = _struct_I.unpack(str[start:end]) start = end end += length if", "of inertia float64 iyy # moment of inertia float64 iyz", "Do not edit.\"\"\" import codecs import sys python3 = True", "are implicitly/explicitly set to None will be assigned a default", "= str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return self except", "end += length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else:", "self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "_x = self.link_name length = len(_x) if python3 or type(_x)", "is None: self.status_message = '' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode", "internal API method \"\"\" return self._slot_types def serialize(self, buff): \"\"\"", "module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0 start", "_x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz,", "_x = self.status_message length = len(_x) if python3 or type(_x)", "# A representation of pose in free space, composed of", "serialize message into buffer :param buff: buffer, ``StringIO`` \"\"\" try:", "represents an orientation in free space in quaternion form. float64", "None: self.success = False if self.status_message is None: self.status_message =", "global _struct_I return _struct_I _struct_7dB7dB = None def _get_struct_7dB7dB(): global", "str): \"\"\" unpack serialized message in str into this message", "False self.status_message = '' def _get_types(self): \"\"\" internal API method", "is more robust to future message changes. You cannot mix", "link names are prefixed by model name, e.g. pr2::base_link \"\"\"", "a Header object _full_text = \"\"\"geometry_msgs/Pose com # center of", "self.status_message is None: self.status_message = '' else: self.com = geometry_msgs.msg.Pose()", "_type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class = GetLinkPropertiesRequest _response_class", "= 0. self.ixz = 0. self.iyy = 0. self.iyz =", "arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args: complete set", "+= length if python3: self.link_name = str[start:end].decode('utf-8', 'rosmsg') else: self.link_name", "_type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False # flag to mark", "0. self.iyy = 0. self.iyz = 0. self.izz = 0.", "module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is None:", "if sys.hexversion > 0x03000000 else False import genpy import struct", "unicode: _x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except", "_x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x", "% (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str, numpy): \"\"\"", "default value. The recommend use is keyword arguments as this", "if self.com is None: self.com = geometry_msgs.msg.Pose() end = 0", "self.com = geometry_msgs.msg.Pose() end = 0 _x = self start", "self))))) def deserialize_numpy(self, str, numpy): \"\"\" unpack serialized message in", "None def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB", "'%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def", "buffer, ``StringIO`` \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z,", "================================================================================ MSG: geometry_msgs/Point # This contains the position of a", "e.g. pr2::base_link \"\"\" __slots__ = ['link_name'] _slot_types = ['string'] def", "+= 4 (length,) = _struct_I.unpack(str[start:end]) start = end end +=", "from gazebo_msgs/GetLinkPropertiesResponse.msg. Do not edit.\"\"\" import codecs import sys python3", "self.izz = 0. if self.success is None: self.success = False", "None: self.iyy = 0. if self.iyz is None: self.iyz =", "python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is", "to future message changes. You cannot mix in-order arguments and", "prefixed by model name, e.g. pr2::base_link \"\"\" __slots__ = ['link_name']", "try: _x = self.link_name length = len(_x) if python3 or", "import genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type", "inertia bool success # return true if get info is", "bool(self.gravity_mode) self.success = bool(self.success) start = end end += 4", "arguments and keyword arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param", "buff, numpy): \"\"\" serialize message with numpy array types into", "= \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\" _has_header = False # flag", "_x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se:", "= False self.mass = 0. self.ixx = 0. self.ixy =", "length if python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message =", "================================================================================ MSG: geometry_msgs/Quaternion # This represents an orientation in free", "x float64 y float64 z float64 w \"\"\" __slots__ =", "changes. You cannot mix in-order arguments and keyword arguments. The", "= False if self.mass is None: self.mass = 0. if", "self.izz is None: self.izz = 0. if self.success is None:", "float64 izz # moment of inertia bool success # return", "0. if self.success is None: self.success = False if self.status_message", "more robust to future message changes. You cannot mix in-order", "\"\"\" try: _x = self.link_name length = len(_x) if python3", "return self._slot_types def serialize(self, buff): \"\"\" serialize message into buffer", "buffer, ``StringIO`` :param numpy: numpy python module \"\"\" try: _x", "numpy): \"\"\" unpack serialized message in str into this message", "0. if self.izz is None: self.izz = 0. if self.success", "return _struct_I # This Python file uses the following encoding:", "'rosmsg') else: self.link_name = str[start:end] return self except struct.error as", "\"\"\" serialize message with numpy array types into buffer :param", "= geometry_msgs.msg.Pose() end = 0 _x = self start =", "free space in quaternion form. float64 x float64 y float64", "start = end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x,", "0. self.izz = 0. self.success = False self.status_message = ''", "= 0. if self.izz is None: self.izz = 0. if", "_get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB = None def _get_struct_7dB7dB():", "This contains the position of a point in free space", "_struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB", "mass of link float64 ixx # moment of inertia float64", "_x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode", "codecs import sys python3 = True if sys.hexversion > 0x03000000", "of inertias # relative to the link frame bool gravity_mode", "_get_struct_I(): global _struct_I return _struct_I # This Python file uses", "_x.iyy, _x.iyz, _x.izz, _x.success)) _x = self.status_message length = len(_x)", "\"\"\" serialize message into buffer :param buff: buffer, ``StringIO`` \"\"\"", "the position of a point in free space float64 x", "set gravity mode on/off float64 mass # linear mass of", "args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields cannot", "self.mass = 0. if self.ixx is None: self.ixx = 0.", "= self.status_message length = len(_x) if python3 or type(_x) ==", "self.link_name is None: self.link_name = '' else: self.link_name = ''", "_x = _x.encode('utf-8') length = len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error", "\"\"\" internal API method \"\"\" return self._slot_types def serialize(self, buff):", "TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te),", "if self.ixy is None: self.ixy = 0. if self.ixz is", "= self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode,", "instance :param str: byte array of serialized message, ``str`` \"\"\"", "def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB =", "struct import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type =", "end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z,", "None: self.ixx = 0. if self.ixy is None: self.ixy =", "This Python file uses the following encoding: utf-8 \"\"\"autogenerated by", "is None: self.iyz = 0. if self.izz is None: self.izz", "> 0x03000000 else False import genpy import struct class GetLinkPropertiesRequest(genpy.Message):", "None: self.link_name = '' else: self.link_name = '' def _get_types(self):", "_struct_I # This Python file uses the following encoding: utf-8", "float64 x float64 y float64 z float64 w \"\"\" __slots__", "contains the position of a point in free space float64", "in free space in quaternion form. float64 x float64 y", "0. if self.iyy is None: self.iyy = 0. if self.iyz", "of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if", "= str[start:end].decode('utf-8', 'rosmsg') else: self.link_name = str[start:end] return self except", "self.ixx = 0. self.ixy = 0. self.ixz = 0. self.iyy", "= end end += 114 (_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y,", "uses the following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg.", "be None, assign default values for those that are if", "if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message fields", "method \"\"\" return self._slot_types def serialize(self, buff): \"\"\" serialize message", "com # center of mass location in link frame #", "try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z,", "by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import codecs import", "_x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success))", "and keyword arguments. The available fields are: com,gravity_mode,mass,ixx,ixy,ixz,iyy,iyz,izz,success,status_message :param args:", "= 0. self.izz = 0. self.success = False self.status_message =", "_x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode =", "of the moment of inertias # relative to the link", "(type(te), str(te), str(locals().get('_x', self))))) def deserialize(self, str): \"\"\" unpack serialized", "if available ================================================================================ MSG: geometry_msgs/Pose # A representation of pose", "self.status_message = '' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode = False", "uses the following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesResponse.msg.", "to the link frame bool gravity_mode # set gravity mode", "as this is more robust to future message changes. You", "0x03000000 else False import genpy import struct import geometry_msgs.msg class", "of mass location in link frame # and orientation of", "available ================================================================================ MSG: geometry_msgs/Pose # A representation of pose in", "e: raise genpy.DeserializationError(e) # most likely buffer underfill def serialize_numpy(self,", "= \"7d82d60381f1b66a30f2157f60884345\" _type = \"gazebo_msgs/GetLinkPropertiesRequest\" _has_header = False # flag", "= 0. if self.ixz is None: self.ixz = 0. if", "is None: self.ixx = 0. if self.ixy is None: self.ixy", "genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum = \"7d82d60381f1b66a30f2157f60884345\" _type =", "geometry_msgs/Point # This contains the position of a point in", "default values for those that are if self.com is None:", "None: self.com = geometry_msgs.msg.Pose() end = 0 _x = self", "= end end += length if python3: self.link_name = str[start:end].decode('utf-8',", "frame # and orientation of the moment of inertias #", "name, e.g. pr2::base_link \"\"\" __slots__ = ['link_name'] _slot_types = ['string']", "for those that are if self.com is None: self.com =", "point in free space float64 x float64 y float64 z", "self.com is None: self.com = geometry_msgs.msg.Pose() end = 0 _x", "The recommend use is keyword arguments as this is more", "self.iyz = 0. if self.izz is None: self.izz = 0.", ":param buff: buffer, ``StringIO`` :param numpy: numpy python module \"\"\"", "this message instance using numpy for array types :param str:", "representation of pose in free space, composed of position and", "= 0. if self.success is None: self.success = False if", "self))))) def deserialize(self, str): \"\"\" unpack serialized message in str", "in .msg order :param kwds: use keyword arguments corresponding to", "def serialize(self, buff): \"\"\" serialize message into buffer :param buff:", "\"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y,", "None: self.status_message = '' else: self.com = geometry_msgs.msg.Pose() self.gravity_mode =", "= geometry_msgs.msg.Pose() self.gravity_mode = False self.mass = 0. self.ixx =", "0 start = end end += 4 (length,) = _struct_I.unpack(str[start:end])", "len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error(\"%s: '%s' when", "float64 ixx # moment of inertia float64 ixy # moment", "You cannot mix in-order arguments and keyword arguments. The available", "keyword arguments as this is more robust to future message", "_x.com.position.y, _x.com.position.z, _x.com.orientation.x, _x.com.orientation.y, _x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy,", "inertia float64 iyy # moment of inertia float64 iyz #", "into this message instance using numpy for array types :param", "0. self.ixx = 0. self.ixy = 0. self.ixz = 0.", "# moment of inertia bool success # return true if", "is None: self.com = geometry_msgs.msg.Pose() end = 0 _x =", "if self.ixx is None: self.ixx = 0. if self.ixy is", "_struct_7dB7dB is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object):", "float64 mass # linear mass of link float64 ixx #", "frame bool gravity_mode # set gravity mode on/off float64 mass", "array types into buffer :param buff: buffer, ``StringIO`` :param numpy:", "Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This contains", "\"\"\" if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds) # message", "iyz # moment of inertia float64 izz # moment of", "byte array of serialized message, ``str`` :param numpy: numpy python", "file uses the following encoding: utf-8 \"\"\"autogenerated by genpy from", "\"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0 start =", ":param str: byte array of serialized message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type", "end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end end", ":param numpy: numpy python module \"\"\" try: _x = self.link_name", "moment of inertia float64 ixy # moment of inertia float64", "len(_x) if python3 or type(_x) == unicode: _x = _x.encode('utf-8')", "API method \"\"\" return self._slot_types def serialize(self, buff): \"\"\" serialize", "in link frame # and orientation of the moment of", "python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0", "location in link frame # and orientation of the moment", "orientation of the moment of inertias # relative to the", "\"\"\" return self._slot_types def serialize(self, buff): \"\"\" serialize message into", "codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0 start = end", ":param args: complete set of field values, in .msg order", "not edit.\"\"\" import codecs import sys python3 = True if", "float64 w \"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def", "= True if sys.hexversion > 0x03000000 else False import genpy", "writing '%s'\" % (type(te), str(te), str(locals().get('_x', self))))) def deserialize_numpy(self, str,", "None: self.izz = 0. if self.success is None: self.success =", "class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class =", "free space float64 x float64 y float64 z ================================================================================ MSG:", "_x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end])", "'rosmsg') else: self.status_message = str[start:end] return self except struct.error as", "end end += 4 (length,) = _struct_I.unpack(str[start:end]) start = end", "struct.error as e: raise genpy.DeserializationError(e) # most likely buffer underfill", "= \"\"\"geometry_msgs/Pose com # center of mass location in link", "inertia float64 ixz # moment of inertia float64 iyy #", "= 0 start = end end += 4 (length,) =", "buff): \"\"\" serialize message into buffer :param buff: buffer, ``StringIO``", "likely buffer underfill _struct_I = genpy.struct_I def _get_struct_I(): global _struct_I", "start = end end += 4 (length,) = _struct_I.unpack(str[start:end]) start", "_x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz, _x.success)) _x =", "presence of a Header object _full_text = \"\"\"string link_name #", "\"\"\" __slots__ = ['link_name'] _slot_types = ['string'] def __init__(self, *args,", "is None: self.link_name = '' else: self.link_name = '' def", "to mark the presence of a Header object _full_text =", "is keyword arguments as this is more robust to future", "instance using numpy for array types :param str: byte array", "gravity mode on/off float64 mass # linear mass of link", "# moment of inertia float64 izz # moment of inertia", "orientation in free space in quaternion form. float64 x float64", "of inertia float64 izz # moment of inertia bool success", "position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point # This contains the", "numpy python module \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y,", "\"\"\" __slots__ = ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args,", "if self.gravity_mode is None: self.gravity_mode = False if self.mass is", "as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se),", "self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return self", "_full_text = \"\"\"string link_name # name of link # link", "the following encoding: utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do", "self.ixx = 0. if self.ixy is None: self.ixy = 0.", "= str[start:end] return self except struct.error as e: raise genpy.DeserializationError(e)", "buffer, ``StringIO`` \"\"\" try: _x = self.link_name length = len(_x)", "**kwds) # message fields cannot be None, assign default values", "into buffer :param buff: buffer, ``StringIO`` :param numpy: numpy python", ":param str: byte array of serialized message, ``str`` :param numpy:", "a point in free space float64 x float64 y float64", "= geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode = False if", "message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end = 0", "_has_header = False # flag to mark the presence of", "by model name, e.g. pr2::base_link \"\"\" __slots__ = ['link_name'] _slot_types", "_struct_7dB7dB class GetLinkProperties(object): _type = 'gazebo_msgs/GetLinkProperties' _md5sum = '0e06a70386d0ee3fb880c02f23fcd821' _request_class", "None: self.com = geometry_msgs.msg.Pose() if self.gravity_mode is None: self.gravity_mode =", "if get info is successful string status_message # comments if", "position and orientation. Point position Quaternion orientation ================================================================================ MSG: geometry_msgs/Point", "self.ixz = 0. if self.iyy is None: self.iyy = 0.", "self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start = end end", "ixx # moment of inertia float64 ixy # moment of", "# This Python file uses the following encoding: utf-8 \"\"\"autogenerated", "the moment of inertias # relative to the link frame", "the presence of a Header object _full_text = \"\"\"geometry_msgs/Pose com", "is None: self.success = False if self.status_message is None: self.status_message", "# moment of inertia float64 iyz # moment of inertia", "z ================================================================================ MSG: geometry_msgs/Quaternion # This represents an orientation in", "= 0. self.success = False self.status_message = '' def _get_types(self):", "def _get_struct_I(): global _struct_I return _struct_I _struct_7dB7dB = None def", "# most likely buffer underfill def serialize_numpy(self, buff, numpy): \"\"\"", "to set specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesResponse,", "_x.com.orientation.z, _x.com.orientation.w, _x.gravity_mode, _x.mass, _x.ixx, _x.ixy, _x.ixz, _x.iyy, _x.iyz, _x.izz,", "for array types :param str: byte array of serialized message,", "\"\"\" if args or kwds: super(GetLinkPropertiesResponse, self).__init__(*args, **kwds) # message", "names to set specific fields. \"\"\" if args or kwds:", "_x.iyz, _x.izz, _x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success =", "str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return self except struct.error", "str: byte array of serialized message, ``str`` :param numpy: numpy", "specific fields. \"\"\" if args or kwds: super(GetLinkPropertiesRequest, self).__init__(*args, **kwds)", "numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: end =", "= '' def _get_types(self): \"\"\" internal API method \"\"\" return", "types :param str: byte array of serialized message, ``str`` :param", "serialized message in str into this message instance using numpy", "_struct_7dB7dB = None def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is", "of inertia float64 ixy # moment of inertia float64 ixz", "= bool(self.success) start = end end += 4 (length,) =", "True if sys.hexversion > 0x03000000 else False import genpy import", "None will be assigned a default value. The recommend use", "link float64 ixx # moment of inertia float64 ixy #", "MSG: geometry_msgs/Point # This contains the position of a point", "orientation ================================================================================ MSG: geometry_msgs/Point # This contains the position of", "None: self.ixy = 0. if self.ixz is None: self.ixz =", "moment of inertia float64 iyz # moment of inertia float64", "of link # link names are prefixed by model name,", "robust to future message changes. You cannot mix in-order arguments", "= 0. if self.iyy is None: self.iyy = 0. if", "# moment of inertia float64 ixy # moment of inertia", "self.ixz is None: self.ixz = 0. if self.iyy is None:", "ixy # moment of inertia float64 ixz # moment of", "_struct_I = genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I #", "of a Header object _full_text = \"\"\"string link_name # name", "message, ``str`` \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com is", "status_message # comments if available ================================================================================ MSG: geometry_msgs/Pose # A", "genpy.struct_I def _get_struct_I(): global _struct_I return _struct_I # This Python", "0x03000000 else False import genpy import struct class GetLinkPropertiesRequest(genpy.Message): _md5sum", "of link float64 ixx # moment of inertia float64 ixy", "_x.success,) = _get_struct_7dB7dB().unpack(str[start:end]) self.gravity_mode = bool(self.gravity_mode) self.success = bool(self.success) start", "__init__(self, *args, **kwds): \"\"\" Constructor. Any message fields that are", "_full_text = \"\"\"geometry_msgs/Pose com # center of mass location in", "this message instance :param str: byte array of serialized message,", "= ['com','gravity_mode','mass','ixx','ixy','ixz','iyy','iyz','izz','success','status_message'] _slot_types = ['geometry_msgs/Pose','bool','float64','float64','float64','float64','float64','float64','float64','bool','string'] def __init__(self, *args, **kwds): \"\"\"", "(type(se), str(se), str(locals().get('_x', self))))) except TypeError as te: self._check_types(ValueError(\"%s: '%s'", "in str into this message instance :param str: byte array", "import geometry_msgs.msg class GetLinkPropertiesResponse(genpy.Message): _md5sum = \"a8619f92d17cfcc3958c0fd13299443d\" _type = \"gazebo_msgs/GetLinkPropertiesResponse\"", "those that are if self.link_name is None: self.link_name = ''", "0. self.iyz = 0. self.izz = 0. self.success = False", "serialized message in str into this message instance :param str:", "numpy python module \"\"\" codecs.lookup_error(\"rosmsg\").msg_type = self._type try: if self.com", "inertia float64 ixy # moment of inertia float64 ixz #", "return true if get info is successful string status_message #", "string status_message # comments if available ================================================================================ MSG: geometry_msgs/Pose #", "name of link # link names are prefixed by model", "= len(_x) buff.write(struct.Struct('<I%ss'%length).pack(length, _x)) except struct.error as se: self._check_types(struct.error(\"%s: '%s'", "else: self.status_message = str[start:end] return self except struct.error as e:", "utf-8 \"\"\"autogenerated by genpy from gazebo_msgs/GetLinkPropertiesRequest.msg. Do not edit.\"\"\" import", "args: complete set of field values, in .msg order :param", "# linear mass of link float64 ixx # moment of", "is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\") return _struct_7dB7dB class GetLinkProperties(object): _type", "_get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is None: _struct_7dB7dB = struct.Struct(\"<7dB7dB\")", "float64 z ================================================================================ MSG: geometry_msgs/Quaternion # This represents an orientation", "python3: self.status_message = str[start:end].decode('utf-8', 'rosmsg') else: self.status_message = str[start:end] return", "is None: self.izz = 0. if self.success is None: self.success", "buff: buffer, ``StringIO`` \"\"\" try: _x = self buff.write(_get_struct_7dB7dB().pack(_x.com.position.x, _x.com.position.y,", "= None def _get_struct_7dB7dB(): global _struct_7dB7dB if _struct_7dB7dB is None:", "_get_types(self): \"\"\" internal API method \"\"\" return self._slot_types def serialize(self,", "= '' else: self.link_name = '' def _get_types(self): \"\"\" internal", "numpy python module \"\"\" try: _x = self.link_name length =" ]
[ "name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError(\"No kernel found", "reraise try: # I prefer not to take a dependency", "def kernelspec_from_language(language): \"\"\"Return the python kernel that matches the current", "kernel found that matches the current python executable {}\\n\".format( sys.executable", "in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language): return {", "found that matches the current python executable {}\\n\".format( sys.executable )", "= reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification based on", "reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification based on the", "that matches the current env, or the first kernel that", "same_language from .reraise import reraise try: # I prefer not", "cmd = kernel_specs.argv[0] if ( kernel_specs.language == \"python\" and os.path.isfile(cmd)", "and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return { \"name\": name,", "given language\"\"\" if language == \"python\": # Return the kernel", "( kernel_specs.language == \"python\" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ):", "find_kernel_specs = reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the", "'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not in", "that matches the current Python executable for name in find_kernel_specs():", "kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language): return { \"name\": name,", "\"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError(\"No kernel found for", "I prefer not to take a dependency on jupyter_client from", "the 'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not", "\"\"\"Find kernel specifications for a given language\"\"\" import os import", "notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the python kernel that matches", ") + \"Install one with 'python -m ipykernel install --name", "name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language): return", "\"No kernel found that matches the current python executable {}\\n\".format(", "except ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language):", "\"python\" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return { \"name\":", "language: try: kernelspec = kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"] =", "\"python\": # Return the kernel that matches the current Python", "and language: try: kernelspec = kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"]", "get_kernel_spec(name) cmd = kernel_specs.argv[0] if ( kernel_specs.language == \"python\" and", "in notebook.metadata and language: try: kernelspec = kernelspec_from_language(language) except ValueError:", "or the first kernel that matches the given language\"\"\" if", "current Python executable for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name)", "for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0]", "= get_kernel_spec(name) if same_language(kernel_specs.language, language): return { \"name\": name, \"language\":", "Return the kernel that matches the current Python executable for", "except ImportError as err: find_kernel_specs = reraise(err) get_kernel_spec = reraise(err)", "{}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the python kernel that matches the", "kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def", "kernel_specs.display_name, } raise ValueError(\"No kernel found for the language {}\".format(language))", "given language\"\"\" import os import sys from .languages import same_language", "ImportError as err: find_kernel_specs = reraise(err) get_kernel_spec = reraise(err) def", "language, \"display_name\": kernel_specs.display_name, } raise ValueError( \"No kernel found that", "the given language\"\"\" if language == \"python\": # Return the", "matches the current env, or the first kernel that matches", "language, \"display_name\": kernel_specs.display_name, } raise ValueError(\"No kernel found for the", "find_kernel_specs, get_kernel_spec except ImportError as err: find_kernel_specs = reraise(err) get_kernel_spec", "from .languages import same_language from .reraise import reraise try: #", "kernelspec_from_language(language): \"\"\"Return the python kernel that matches the current env,", "import sys from .languages import same_language from .reraise import reraise", "kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the python kernel that", "err: find_kernel_specs = reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set", "raise ValueError( \"No kernel found that matches the current python", "\"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError(\"No kernel", "for a given language\"\"\" import os import sys from .languages", "notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not in notebook.metadata and language: try:", "to take a dependency on jupyter_client from jupyter_client.kernelspec import find_kernel_specs,", "= reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the kernel", "kernel that matches the current env, or the first kernel", "# I prefer not to take a dependency on jupyter_client", "\"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError( \"No", "= kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\")", "= kernel_specs.argv[0] if ( kernel_specs.language == \"python\" and os.path.isfile(cmd) and", "ValueError( \"No kernel found that matches the current python executable", "kernel specification based on the 'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\",", "language\"\"\" if language == \"python\": # Return the kernel that", "{ \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError(", "-m ipykernel install --name kernel_name [--user]'\" ) for name in", "def set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification based on the 'main_language'", "\"\"\"Return the python kernel that matches the current env, or", "'python -m ipykernel install --name kernel_name [--user]'\" ) for name", "current env, or the first kernel that matches the given", "notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the python", "import same_language from .reraise import reraise try: # I prefer", "specifications for a given language\"\"\" import os import sys from", "ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return", "get_kernel_spec(name) if same_language(kernel_specs.language, language): return { \"name\": name, \"language\": language,", "os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return { \"name\": name, \"language\":", "prefer not to take a dependency on jupyter_client from jupyter_client.kernelspec", "current python executable {}\\n\".format( sys.executable ) + \"Install one with", "try: # I prefer not to take a dependency on", "} raise ValueError( \"No kernel found that matches the current", "kernel that matches the given language\"\"\" if language == \"python\":", "python executable {}\\n\".format( sys.executable ) + \"Install one with 'python", "metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not in notebook.metadata", "one with 'python -m ipykernel install --name kernel_name [--user]'\" )", "# Return the kernel that matches the current Python executable", "\"display_name\": kernel_specs.display_name, } raise ValueError( \"No kernel found that matches", "+ \"Install one with 'python -m ipykernel install --name kernel_name", ") for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language,", "if language == \"python\": # Return the kernel that matches", "kernelspec = kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\",", "same_language(kernel_specs.language, language): return { \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name,", "import reraise try: # I prefer not to take a", "kernel_specs.argv[0] if ( kernel_specs.language == \"python\" and os.path.isfile(cmd) and os.path.samefile(cmd,", "os.path.samefile(cmd, sys.executable) ): return { \"name\": name, \"language\": language, \"display_name\":", "based on the 'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if", "that matches the current python executable {}\\n\".format( sys.executable ) +", "specification based on the 'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\")", "that matches the given language\"\"\" if language == \"python\": #", "find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language): return { \"name\":", "kernel_specs.language == \"python\" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return", "as err: find_kernel_specs = reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook):", "= kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the python kernel", "sys.executable) ): return { \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name,", "the python kernel that matches the current env, or the", "\"Install one with 'python -m ipykernel install --name kernel_name [--user]'\"", "jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError as err: find_kernel_specs =", "kernel specifications for a given language\"\"\" import os import sys", "executable {}\\n\".format( sys.executable ) + \"Install one with 'python -m", "env, or the first kernel that matches the given language\"\"\"", "name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError( \"No kernel", ".reraise import reraise try: # I prefer not to take", "os import sys from .languages import same_language from .reraise import", "get_kernel_spec except ImportError as err: find_kernel_specs = reraise(err) get_kernel_spec =", "import os import sys from .languages import same_language from .reraise", "language == \"python\": # Return the kernel that matches the", "reraise(err) get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification", "language): return { \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, }", "= notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not in notebook.metadata and language:", "the kernel specification based on the 'main_language' metadata\"\"\" language =", "the current python executable {}\\n\".format( sys.executable ) + \"Install one", "language\"\"\" import os import sys from .languages import same_language from", "{}\\n\".format( sys.executable ) + \"Install one with 'python -m ipykernel", "name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0] if", "for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if same_language(kernel_specs.language, language):", "matches the current python executable {}\\n\".format( sys.executable ) + \"Install", "the first kernel that matches the given language\"\"\" if language", "kernel that matches the current Python executable for name in", "if ( kernel_specs.language == \"python\" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable)", "if same_language(kernel_specs.language, language): return { \"name\": name, \"language\": language, \"display_name\":", "take a dependency on jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec", "language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\" not in notebook.metadata and", "the kernel that matches the current Python executable for name", "executable for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd =", "from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError as err: find_kernel_specs", "on the 'main_language' metadata\"\"\" language = notebook.metadata.get(\"jupytext\", {}).get(\"main_language\") if \"kernelspec\"", "a given language\"\"\" import os import sys from .languages import", "\"\"\"Set the kernel specification based on the 'main_language' metadata\"\"\" language", "kernel_name [--user]'\" ) for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name)", "kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0] if ( kernel_specs.language ==", "[--user]'\" ) for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) if", "{ \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError(\"No", "in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0] if (", "first kernel that matches the given language\"\"\" if language ==", "not to take a dependency on jupyter_client from jupyter_client.kernelspec import", "a dependency on jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except", "set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification based on the 'main_language' metadata\"\"\"", "== \"python\" and os.path.isfile(cmd) and os.path.samefile(cmd, sys.executable) ): return {", "and os.path.samefile(cmd, sys.executable) ): return { \"name\": name, \"language\": language,", "): return { \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, }", "== \"python\": # Return the kernel that matches the current", "ipykernel install --name kernel_name [--user]'\" ) for name in find_kernel_specs():", "install --name kernel_name [--user]'\" ) for name in find_kernel_specs(): kernel_specs", "jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError as err:", "dependency on jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError", "on jupyter_client from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec except ImportError as", ".languages import same_language from .reraise import reraise try: # I", "\"kernelspec\" not in notebook.metadata and language: try: kernelspec = kernelspec_from_language(language)", "{}).get(\"main_language\") if \"kernelspec\" not in notebook.metadata and language: try: kernelspec", "sys.executable ) + \"Install one with 'python -m ipykernel install", "try: kernelspec = kernelspec_from_language(language) except ValueError: return notebook.metadata[\"kernelspec\"] = kernelspec", "kernel_specs.display_name, } raise ValueError( \"No kernel found that matches the", "find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd = kernel_specs.argv[0] if ( kernel_specs.language", "if \"kernelspec\" not in notebook.metadata and language: try: kernelspec =", "\"language\": language, \"display_name\": kernel_specs.display_name, } raise ValueError( \"No kernel found", "the current env, or the first kernel that matches the", "sys from .languages import same_language from .reraise import reraise try:", "not in notebook.metadata and language: try: kernelspec = kernelspec_from_language(language) except", "return { \"name\": name, \"language\": language, \"display_name\": kernel_specs.display_name, } raise", "Python executable for name in find_kernel_specs(): kernel_specs = get_kernel_spec(name) cmd", "from .reraise import reraise try: # I prefer not to", "= get_kernel_spec(name) cmd = kernel_specs.argv[0] if ( kernel_specs.language == \"python\"", "get_kernel_spec = reraise(err) def set_kernelspec_from_language(notebook): \"\"\"Set the kernel specification based", "the current Python executable for name in find_kernel_specs(): kernel_specs =", "matches the given language\"\"\" if language == \"python\": # Return", "--name kernel_name [--user]'\" ) for name in find_kernel_specs(): kernel_specs =", "python kernel that matches the current env, or the first", "with 'python -m ipykernel install --name kernel_name [--user]'\" ) for", "matches the current Python executable for name in find_kernel_specs(): kernel_specs", "import find_kernel_specs, get_kernel_spec except ImportError as err: find_kernel_specs = reraise(err)", "return notebook.metadata[\"kernelspec\"] = kernelspec notebook.metadata.get(\"jupytext\", {}).pop(\"main_language\") def kernelspec_from_language(language): \"\"\"Return the", "\"display_name\": kernel_specs.display_name, } raise ValueError(\"No kernel found for the language", "notebook.metadata and language: try: kernelspec = kernelspec_from_language(language) except ValueError: return" ]
[ "loaded. Returns ------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or", "will be loaded. Returns ------- result : csc_matrix, csr_matrix, bsr_matrix,", "cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'],", "sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data )", "csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse matrix containing", "bool, optional Allow compressing the file. Default: True See Also", "sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix", "**PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format'] except KeyError as", "csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse matrix containing the", "except KeyError as e: raise ValueError('The file {} does not", "bsr_matrix, dia_matrix or coo_matrix A sparse matrix containing the loaded", "Parameters ---------- file : str or file-like object Either the", "may contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try: cls", "matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)", "PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): \"\"\" Save a", "sparse matrix to a file using ``.npz`` format. Parameters ----------", "= matrix_format.item() if not isinstance(matrix_format, str): # Play safe with", "a sparse matrix from a file using ``.npz`` format. numpy.savez:", "= {} if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)", "the ``.npz`` extension will be appended to the file name", "coo``) The sparse matrix to save. compressed : bool, optional", "See Also -------- scipy.sparse.save_npz: Save a sparse matrix to a", "if not isinstance(matrix_format, str): # Play safe with Python 2", "input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): \"\"\" Save", "backward compatibility; # files saved with SciPy < 1.0.0 may", "See Also -------- scipy.sparse.load_npz: Load a sparse matrix from a", "numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.", "optional Allow compressing the file. Default: True See Also --------", "format. numpy.load: Load several arrays from a ``.npz`` archive. Examples", ">>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) \"\"\"", "arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is not implemented for sparse", "``csr``, ``bsr``, ``dia`` or coo``) The sparse matrix to save.", "{} if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif", "(format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The sparse matrix", "``.npz`` format. Parameters ---------- file : str or file-like object", "Save a sparse matrix to a file using ``.npz`` format.", "will be saved. If file is a string, the ``.npz``", "into a ``.npz`` archive. numpy.savez_compressed : Save several arrays into", "if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format", "raise ValueError('The file {} does not contain a sparse matrix.'.format(file))", "arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else:", "object) where the data will be saved. If file is", "matrix from a file using ``.npz`` format. numpy.savez: Save several", "name (string) or an open file (file-like object) where the", "Raises ------ OSError If the input file does not exist", "return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return", "np import scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make loading", "the input file does not exist or cannot be read.", "isinstance(matrix_format, str): # Play safe with Python 2 vs 3", "compressing the file. Default: True See Also -------- scipy.sparse.load_npz: Load", "unicode or bytes. matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse,", "**arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): \"\"\" Load a sparse", "Store sparse matrix to disk, and load it again: >>>", "implemented for sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape,", ">>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with", "loaded['format'] except KeyError as e: raise ValueError('The file {} does", "loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape'])", "open file (file-like object) where the data will be saved.", "files saved with SciPy < 1.0.0 may contain unicode or", "= ['save_npz', 'load_npz'] # Make loading safe vs. malicious input", "Save several arrays into a ``.npz`` archive. numpy.savez_compressed : Save", "-------- scipy.sparse.save_npz: Save a sparse matrix to a file using", "(file-like object) where the data will be saved. If file", "sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) \"\"\" arrays_dict", "shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented for ' 'sparse", ": str or file-like object Either the file name (string)", "malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): \"\"\"", "or bytes. matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format))", "matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is not", "the file name if it is not already there. matrix:", "``bsr``, ``dia`` or coo``) The sparse matrix to save. compressed", "Play safe with Python 2 vs 3 backward compatibility; #", "matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format ==", "['save_npz', 'load_npz'] # Make loading safe vs. malicious input PICKLE_KWARGS", "---------- file : str or file-like object Either the file", "the data will be loaded. Returns ------- result : csc_matrix,", "cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'],", "matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The", "Save several arrays into a compressed ``.npz`` archive. Examples --------", "== 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else:", "('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format", "name if it is not already there. matrix: spmatrix (format:", "**arrays_dict) def load_npz(file): \"\"\" Load a sparse matrix from a", "bytes. matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except", "if it is not already there. matrix: spmatrix (format: ``csc``,", "0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>'", "3], [4, 0, 0]], dtype=int64) \"\"\" arrays_dict = {} if", "save. compressed : bool, optional Allow compressing the file. Default:", "e: raise ValueError('The file {} does not contain a sparse", "``.npz`` archive. numpy.savez_compressed : Save several arrays into a compressed", "safe with Python 2 vs 3 backward compatibility; # files", "0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type '<class", "be loaded. Returns ------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix", "dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix", "a sparse matrix.'.format(file)) from e matrix_format = matrix_format.item() if not", "'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format", "Also -------- scipy.sparse.save_npz: Save a sparse matrix to a file", "'<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse Column", "== 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo':", "sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type", "it is not already there. matrix: spmatrix (format: ``csc``, ``csr``,", "matrix to a file using ``.npz`` format. Parameters ---------- file", "\"\"\" Save a sparse matrix to a file using ``.npz``", "with Python 2 vs 3 backward compatibility; # files saved", "the loaded data. Raises ------ OSError If the input file", "be appended to the file name if it is not", "matrix_format = matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError", "NotImplementedError('Save is not implemented for sparse matrix of format {}.'.format(matrix.format))", "OSError If the input file does not exist or cannot", "matrix of type '<class 'numpy.int64'>' with 2 stored elements in", "= scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class", "loading safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file,", "if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): \"\"\"", "dia_matrix or coo_matrix A sparse matrix containing the loaded data.", "is not already there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``,", "\"{}\"'.format(matrix_format)) from e if matrix_format in ('csc', 'csr', 'bsr'): return", "archive. Examples -------- Store sparse matrix to disk, and load", "type '<class 'numpy.int64'>' with 2 stored elements in Compressed Sparse", "open file (file-like object) where the data will be loaded.", "loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']),", "------ OSError If the input file does not exist or", "(file-like object) where the data will be loaded. Returns -------", "file name if it is not already there. matrix: spmatrix", "<2x3 sparse matrix of type '<class 'numpy.int64'>' with 2 stored", "in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3],", "a sparse matrix to a file using ``.npz`` format. numpy.load:", ": Save several arrays into a compressed ``.npz`` archive. Examples", "\"\"\" Load a sparse matrix from a file using ``.npz``", "matrix from a file using ``.npz`` format. Parameters ---------- file", "elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row,", "('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets)", "'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is not implemented for", "sparse matrix to disk, and load it again: >>> import", "where the data will be loaded. Returns ------- result :", "elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is", "matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise", "to a file using ``.npz`` format. Parameters ---------- file :", "as e: raise ValueError('The file {} does not contain a", "a file using ``.npz`` format. numpy.load: Load several arrays from", ">>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3", "loaded data. Raises ------ OSError If the input file does", "arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save", "scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>'", "into a compressed ``.npz`` archive. Examples -------- Store sparse matrix", "Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0,", ">>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse matrix of", "import scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make loading safe", "``.npz`` archive. Examples -------- Store sparse matrix to disk, and", "saved. If file is a string, the ``.npz`` extension will", "matrix to disk, and load it again: >>> import scipy.sparse", "sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix", "\"\"\" arrays_dict = {} if matrix.format in ('csc', 'csr', 'bsr'):", "format. Parameters ---------- file : str or file-like object Either", "from a file using ``.npz`` format. numpy.savez: Save several arrays", "# Play safe with Python 2 vs 3 backward compatibility;", "vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix, compressed=True):", "and load it again: >>> import scipy.sparse >>> sparse_matrix =", "col=matrix.col) else: raise NotImplementedError('Save is not implemented for sparse matrix", "{} does not contain a sparse matrix.'.format(file)) from e matrix_format", "format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64)", "If the input file does not exist or cannot be", "0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>>", "sparse matrix from a file using ``.npz`` format. numpy.savez: Save", "(string) or an open file (file-like object) where the data", "exist or cannot be read. See Also -------- scipy.sparse.save_npz: Save", "matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape'])", "dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): \"\"\" Save a sparse matrix", "0]], dtype=int64) \"\"\" arrays_dict = {} if matrix.format in ('csc',", "np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): \"\"\" Load a", "Load several arrays from a ``.npz`` archive. Examples -------- Store", "format \"{}\"'.format(matrix_format)) from e if matrix_format in ('csc', 'csr', 'bsr'):", "3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix", "an open file (file-like object) where the data will be", "not contain a sparse matrix.'.format(file)) from e matrix_format = matrix_format.item()", "the file name (string) or an open file (file-like object)", "result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse", "ValueError('The file {} does not contain a sparse matrix.'.format(file)) from", "import numpy as np import scipy.sparse __all__ = ['save_npz', 'load_npz']", "in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif", "scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]]))", "str): # Play safe with Python 2 vs 3 backward", "def save_npz(file, matrix, compressed=True): \"\"\" Save a sparse matrix to", "a file using ``.npz`` format. Parameters ---------- file : str", "0]], dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format", "'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise", "compressed : bool, optional Allow compressing the file. Default: True", "'load_npz'] # Make loading safe vs. malicious input PICKLE_KWARGS =", "except AttributeError as e: raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from", "sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz',", "matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e:", "else: raise NotImplementedError('Save is not implemented for sparse matrix of", "``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The sparse matrix to", "cannot be read. See Also -------- scipy.sparse.save_npz: Save a sparse", "for sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data", "matrix to save. compressed : bool, optional Allow compressing the", "there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``)", "< 1.0.0 may contain unicode or bytes. matrix_format = matrix_format.decode('ascii')", "# Make loading safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False)", "else: np.savez(file, **arrays_dict) def load_npz(file): \"\"\" Load a sparse matrix", "containing the loaded data. Raises ------ OSError If the input", "shape=loaded['shape']) elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif", "from e matrix_format = matrix_format.item() if not isinstance(matrix_format, str): #", "saved with SciPy < 1.0.0 may contain unicode or bytes.", "arrays into a ``.npz`` archive. numpy.savez_compressed : Save several arrays", "or coo_matrix A sparse matrix containing the loaded data. Raises", "(loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented for", "file using ``.npz`` format. Parameters ---------- file : str or", "sparse_matrix <2x3 sparse matrix of type '<class 'numpy.int64'>' with 2", "read. See Also -------- scipy.sparse.save_npz: Save a sparse matrix to", "loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])),", "file {} does not contain a sparse matrix.'.format(file)) from e", "a ``.npz`` archive. numpy.savez_compressed : Save several arrays into a", ": bool, optional Allow compressing the file. Default: True See", "file using ``.npz`` format. numpy.savez: Save several arrays into a", "file (file-like object) where the data will be saved. If", "of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed:", "shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict)", "using ``.npz`` format. numpy.load: Load several arrays from a ``.npz``", "matrix_format = matrix_format.item() if not isinstance(matrix_format, str): # Play safe", "'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return", "to disk, and load it again: >>> import scipy.sparse >>>", "from a file using ``.npz`` format. Parameters ---------- file :", ">>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4,", "spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or coo``) The sparse", "format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file,", "``.npz`` extension will be appended to the file name if", "Load a sparse matrix from a file using ``.npz`` format.", "file. Default: True See Also -------- scipy.sparse.load_npz: Load a sparse", "= loaded['format'] except KeyError as e: raise ValueError('The file {}", "ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from e if matrix_format in ('csc',", "elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else:", "file name (string) or an open file (file-like object) where", "vs 3 backward compatibility; # files saved with SciPy <", "[4, 0, 0]], dtype=int64) \"\"\" arrays_dict = {} if matrix.format", "string, the ``.npz`` extension will be appended to the file", "If file is a string, the ``.npz`` extension will be", "a sparse matrix from a file using ``.npz`` format. Parameters", "0, 3], [4, 0, 0]], dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS)", "sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) \"\"\" with", "not isinstance(matrix_format, str): # Play safe with Python 2 vs", "not exist or cannot be read. See Also -------- scipy.sparse.save_npz:", "data will be saved. If file is a string, the", "matrix_format.item() if not isinstance(matrix_format, str): # Play safe with Python", "not already there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia``", "or coo``) The sparse matrix to save. compressed : bool,", "load_npz(file): \"\"\" Load a sparse matrix from a file using", "A sparse matrix containing the loaded data. Raises ------ OSError", "scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz') >>> sparse_matrix <2x3 sparse", "save_npz(file, matrix, compressed=True): \"\"\" Save a sparse matrix to a", "of type '<class 'numpy.int64'>' with 2 stored elements in Compressed", "data. Raises ------ OSError If the input file does not", "several arrays from a ``.npz`` archive. Examples -------- Store sparse", "arrays_dict = {} if matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices,", "with 2 stored elements in Compressed Sparse Column format> >>>", "3 backward compatibility; # files saved with SciPy < 1.0.0", "dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format =", "try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e: raise", "2 vs 3 backward compatibility; # files saved with SciPy", "sparse matrix containing the loaded data. Raises ------ OSError If", "'{}_matrix'.format(matrix_format)) except AttributeError as e: raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format))", "AttributeError as e: raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from e", "the data will be saved. If file is a string,", "matrix.'.format(file)) from e matrix_format = matrix_format.item() if not isinstance(matrix_format, str):", "sparse matrix of type '<class 'numpy.int64'>' with 2 stored elements", ") if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file):", "disk, and load it again: >>> import scipy.sparse >>> sparse_matrix", "NotImplementedError('Load is not implemented for ' 'sparse matrix of format", "safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def save_npz(file, matrix,", "elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0,", "'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format == 'dia':", "else: raise NotImplementedError('Load is not implemented for ' 'sparse matrix", "object Either the file name (string) or an open file", "several arrays into a ``.npz`` archive. numpy.savez_compressed : Save several", "Also -------- scipy.sparse.load_npz: Load a sparse matrix from a file", "compatibility; # files saved with SciPy < 1.0.0 may contain", "getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e: raise ValueError('Unknown matrix format", "def load_npz(file): \"\"\" Load a sparse matrix from a file", "with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format'] except", "1.0.0 may contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try:", "Python 2 vs 3 backward compatibility; # files saved with", "matrix.format in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format ==", "file does not exist or cannot be read. See Also", "\"\"\" with np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format']", "file : str or file-like object Either the file name", "Allow compressing the file. Default: True See Also -------- scipy.sparse.load_npz:", "be read. See Also -------- scipy.sparse.save_npz: Save a sparse matrix", "matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo': arrays_dict.update(row=matrix.row, col=matrix.col)", "Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]],", "shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape'])", "matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) \"\"\" arrays_dict =", "-------- Store sparse matrix to disk, and load it again:", "scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse", "will be appended to the file name if it is", "to a file using ``.npz`` format. numpy.load: Load several arrays", "format. numpy.savez: Save several arrays into a ``.npz`` archive. numpy.savez_compressed", "'numpy.int64'>' with 2 stored elements in Compressed Sparse Column format>", "the file. Default: True See Also -------- scipy.sparse.load_npz: Load a", "file using ``.npz`` format. numpy.load: Load several arrays from a", "raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from e if matrix_format in", "numpy as np import scipy.sparse __all__ = ['save_npz', 'load_npz'] #", "[4, 0, 0]], dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS) as loaded:", "dtype=int64) \"\"\" arrays_dict = {} if matrix.format in ('csc', 'csr',", "cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e: raise ValueError('Unknown", "it again: >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0,", "scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make loading safe vs.", "a file using ``.npz`` format. numpy.savez: Save several arrays into", "np.load(file, **PICKLE_KWARGS) as loaded: try: matrix_format = loaded['format'] except KeyError", "matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) \"\"\" with np.load(file,", "sparse matrix to save. compressed : bool, optional Allow compressing", "is not implemented for sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update(", "# files saved with SciPy < 1.0.0 may contain unicode", "cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented", "0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix", "elif matrix_format == 'dia': return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format", "contain unicode or bytes. matrix_format = matrix_format.decode('ascii') try: cls =", "a sparse matrix to a file using ``.npz`` format. Parameters", "loaded: try: matrix_format = loaded['format'] except KeyError as e: raise", "is a string, the ``.npz`` extension will be appended to", "scipy.sparse.save_npz: Save a sparse matrix to a file using ``.npz``", "numpy.savez: Save several arrays into a ``.npz`` archive. numpy.savez_compressed :", "matrix format \"{}\"'.format(matrix_format)) from e if matrix_format in ('csc', 'csr',", "Make loading safe vs. malicious input PICKLE_KWARGS = dict(allow_pickle=False) def", "'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is", "file-like object Either the file name (string) or an open", "= dict(allow_pickle=False) def save_npz(file, matrix, compressed=True): \"\"\" Save a sparse", "several arrays into a compressed ``.npz`` archive. Examples -------- Store", "str or file-like object Either the file name (string) or", "Default: True See Also -------- scipy.sparse.load_npz: Load a sparse matrix", "using ``.npz`` format. numpy.savez: Save several arrays into a ``.npz``", "return cls((loaded['data'], loaded['offsets']), shape=loaded['shape']) elif matrix_format == 'coo': return cls((loaded['data'],", "contain a sparse matrix.'.format(file)) from e matrix_format = matrix_format.item() if", "in ('csc', 'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia':", "a ``.npz`` archive. Examples -------- Store sparse matrix to disk,", "with SciPy < 1.0.0 may contain unicode or bytes. matrix_format", "be saved. If file is a string, the ``.npz`` extension", "data will be loaded. Returns ------- result : csc_matrix, csr_matrix,", "``dia`` or coo``) The sparse matrix to save. compressed :", "stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0,", "where the data will be saved. If file is a", "extension will be appended to the file name if it", "True See Also -------- scipy.sparse.load_npz: Load a sparse matrix from", "if matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']),", "2 stored elements in Compressed Sparse Column format> >>> sparse_matrix.todense()", "raise NotImplementedError('Save is not implemented for sparse matrix of format", "Examples -------- Store sparse matrix to disk, and load it", ">>> sparse_matrix.todense() matrix([[0, 0, 3], [4, 0, 0]], dtype=int64) >>>", "sparse matrix to a file using ``.npz`` format. numpy.load: Load", "numpy.load: Load several arrays from a ``.npz`` archive. Examples --------", "matrix_format = loaded['format'] except KeyError as e: raise ValueError('The file", "format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file,", "arrays from a ``.npz`` archive. Examples -------- Store sparse matrix", "[4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of type", "does not contain a sparse matrix.'.format(file)) from e matrix_format =", "not implemented for sparse matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'),", "using ``.npz`` format. Parameters ---------- file : str or file-like", "``.npz`` format. numpy.load: Load several arrays from a ``.npz`` archive.", "0, 0]], dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS) as loaded: try:", "does not exist or cannot be read. See Also --------", "as e: raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from e if", "raise NotImplementedError('Load is not implemented for ' 'sparse matrix of", "0, 3], [4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>>", "from e if matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'],", "return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not", "__all__ = ['save_npz', 'load_npz'] # Make loading safe vs. malicious", "compressed=True): \"\"\" Save a sparse matrix to a file using", "------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A", "-------- scipy.sparse.load_npz: Load a sparse matrix from a file using", "= matrix_format.decode('ascii') try: cls = getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as", "np.savez(file, **arrays_dict) def load_npz(file): \"\"\" Load a sparse matrix from", "KeyError as e: raise ValueError('The file {} does not contain", ">>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>>", "{}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict)", "or an open file (file-like object) where the data will", "compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def load_npz(file): \"\"\" Load", "matrix of format {}.'.format(matrix.format)) arrays_dict.update( format=matrix.format.encode('ascii'), shape=matrix.shape, data=matrix.data ) if", "coo_matrix A sparse matrix containing the loaded data. Raises ------", "The sparse matrix to save. compressed : bool, optional Allow", "arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format ==", "to save. compressed : bool, optional Allow compressing the file.", "again: >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3],", "try: matrix_format = loaded['format'] except KeyError as e: raise ValueError('The", "or cannot be read. See Also -------- scipy.sparse.save_npz: Save a", "sparse matrix from a file using ``.npz`` format. Parameters ----------", "file is a string, the ``.npz`` extension will be appended", "to the file name if it is not already there.", "'csr', 'bsr'): arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif", "== 'coo': arrays_dict.update(row=matrix.row, col=matrix.col) else: raise NotImplementedError('Save is not implemented", "already there. matrix: spmatrix (format: ``csc``, ``csr``, ``bsr``, ``dia`` or", "a string, the ``.npz`` extension will be appended to the", "sparse matrix.'.format(file)) from e matrix_format = matrix_format.item() if not isinstance(matrix_format,", "arrays into a compressed ``.npz`` archive. Examples -------- Store sparse", "== 'coo': return cls((loaded['data'], (loaded['row'], loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load", "'csr', 'bsr'): return cls((loaded['data'], loaded['indices'], loaded['indptr']), shape=loaded['shape']) elif matrix_format ==", "as loaded: try: matrix_format = loaded['format'] except KeyError as e:", "= getattr(scipy.sparse, '{}_matrix'.format(matrix_format)) except AttributeError as e: raise ValueError('Unknown matrix", "scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz``", "``.npz`` format. numpy.savez: Save several arrays into a ``.npz`` archive.", "[4, 0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix =", "data=matrix.data ) if compressed: np.savez_compressed(file, **arrays_dict) else: np.savez(file, **arrays_dict) def", "0, 3], [4, 0, 0]], dtype=int64) \"\"\" arrays_dict = {}", "Either the file name (string) or an open file (file-like", "matrix to a file using ``.npz`` format. numpy.load: Load several", "3], [4, 0, 0]])) >>> sparse_matrix <2x3 sparse matrix of", "0, 0]], dtype=int64) >>> scipy.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) >>> sparse_matrix = scipy.sparse.load_npz('/tmp/sparse_matrix.npz')", "from a ``.npz`` archive. Examples -------- Store sparse matrix to", "3], [4, 0, 0]], dtype=int64) \"\"\" with np.load(file, **PICKLE_KWARGS) as", "appended to the file name if it is not already", "load it again: >>> import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0,", "a compressed ``.npz`` archive. Examples -------- Store sparse matrix to", "e: raise ValueError('Unknown matrix format \"{}\"'.format(matrix_format)) from e if matrix_format", "is not implemented for ' 'sparse matrix of format {}.'.format(matrix_format))", "import scipy.sparse >>> sparse_matrix = scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0,", "e matrix_format = matrix_format.item() if not isinstance(matrix_format, str): # Play", "file (file-like object) where the data will be loaded. Returns", "compressed ``.npz`` archive. Examples -------- Store sparse matrix to disk,", "loaded['col'])), shape=loaded['shape']) else: raise NotImplementedError('Load is not implemented for '", "e if matrix_format in ('csc', 'csr', 'bsr'): return cls((loaded['data'], loaded['indices'],", "matrix, compressed=True): \"\"\" Save a sparse matrix to a file", "archive. numpy.savez_compressed : Save several arrays into a compressed ``.npz``", "SciPy < 1.0.0 may contain unicode or bytes. matrix_format =", "as np import scipy.sparse __all__ = ['save_npz', 'load_npz'] # Make", "0, 0]], dtype=int64) \"\"\" arrays_dict = {} if matrix.format in", "object) where the data will be loaded. Returns ------- result", "input file does not exist or cannot be read. See", ": csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix A sparse matrix", "Returns ------- result : csc_matrix, csr_matrix, bsr_matrix, dia_matrix or coo_matrix", "or file-like object Either the file name (string) or an", "matrix containing the loaded data. Raises ------ OSError If the", "= scipy.sparse.csc_matrix(np.array([[0, 0, 3], [4, 0, 0]])) >>> sparse_matrix <2x3", "Compressed Sparse Column format> >>> sparse_matrix.todense() matrix([[0, 0, 3], [4,", "indptr=matrix.indptr) elif matrix.format == 'dia': arrays_dict.update(offsets=matrix.offsets) elif matrix.format == 'coo':" ]
[ "-> None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els: int =", "if create: metadata: Dict[str, any] = { \"next_index\": 0, }", "Dict[str, any]) -> None: super().save(\"metadata\", metadata) def _get_metadata(self) -> Dict[str,", "Atlas(Directory): def __init__(self, services: Services, name: str, parent: str, create:", "idx: int = 0 while idx < max_els: obj: any", "ret: List[any] = [] idx: int = 0 while idx", "None: super().__init__(services, name, parent, create) if create: metadata: Dict[str, any]", "Dict[str, any] = self._get_metadata() return metadata[\"next_index\"] def _increment_index(self) -> None:", "List[any]: ret: List[any] = [] idx: int = 0 while", "def _get_next_index(self) -> int: metadata: Dict[str, any] = self._get_metadata() return", "[] idx: int = 0 while idx < max_els: obj:", "} self._save_metadata(metadata) def append(self, obj: any) -> None: self.save(str(self._get_next_index()), obj)", "def _increment_index(self) -> None: metadata: Dict[str, any] = self._get_metadata() metadata[\"next_index\"]", "= self.load(str(idx)) if obj: ret.append(obj) idx += 1 else: break", "Dict[str, any] = self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata) def _save_metadata(self,", "import Dict, List from simulator.services.resources.directory import Directory from simulator.services.services import", "obj: any = self.load(str(idx)) if obj: ret.append(obj) idx += 1", "\"next_index\": 0, } self._save_metadata(metadata) def append(self, obj: any) -> None:", "ret.append(obj) idx += 1 else: break return ret def _get_next_index(self)", "metadata[\"next_index\"] def _increment_index(self) -> None: metadata: Dict[str, any] = self._get_metadata()", "else: break return ret def _get_next_index(self) -> int: metadata: Dict[str,", "from typing import Dict, List from simulator.services.resources.directory import Directory from", "Dict[str, any] = { \"next_index\": 0, } self._save_metadata(metadata) def append(self,", "parent, create) if create: metadata: Dict[str, any] = { \"next_index\":", "bool = False) -> None: super().__init__(services, name, parent, create) if", "any] = { \"next_index\": 0, } self._save_metadata(metadata) def append(self, obj:", "return metadata[\"next_index\"] def _increment_index(self) -> None: metadata: Dict[str, any] =", "create: metadata: Dict[str, any] = { \"next_index\": 0, } self._save_metadata(metadata)", "+= 1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any]) -> None:", "idx < max_els: obj: any = self.load(str(idx)) if obj: ret.append(obj)", "from simulator.services.services import Services class Atlas(Directory): def __init__(self, services: Services,", "create: bool = False) -> None: super().__init__(services, name, parent, create)", "self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any]) -> None: super().save(\"metadata\", metadata)", "self._save_metadata(metadata) def append(self, obj: any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index()", "Services class Atlas(Directory): def __init__(self, services: Services, name: str, parent:", "class Atlas(Directory): def __init__(self, services: Services, name: str, parent: str,", "any]) -> None: super().save(\"metadata\", metadata) def _get_metadata(self) -> Dict[str, any]:", "import Directory from simulator.services.services import Services class Atlas(Directory): def __init__(self,", "-> None: metadata: Dict[str, any] = self._get_metadata() metadata[\"next_index\"] += 1", "def load_all(self, max_els: int = float(\"inf\")) -> List[any]: ret: List[any]", "super().__init__(services, name, parent, create) if create: metadata: Dict[str, any] =", "obj: any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els:", "return ret def _get_next_index(self) -> int: metadata: Dict[str, any] =", "break return ret def _get_next_index(self) -> int: metadata: Dict[str, any]", "append(self, obj: any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self,", "ret def _get_next_index(self) -> int: metadata: Dict[str, any] = self._get_metadata()", "1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any]) -> None: super().save(\"metadata\",", "simulator.services.resources.directory import Directory from simulator.services.services import Services class Atlas(Directory): def", "name: str, parent: str, create: bool = False) -> None:", "any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els: int", "any] = self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata) def _save_metadata(self, metadata:", "str, create: bool = False) -> None: super().__init__(services, name, parent,", "None: metadata: Dict[str, any] = self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata)", "name, parent, create) if create: metadata: Dict[str, any] = {", "if obj: ret.append(obj) idx += 1 else: break return ret", "int = 0 while idx < max_els: obj: any =", "_increment_index(self) -> None: metadata: Dict[str, any] = self._get_metadata() metadata[\"next_index\"] +=", "obj: ret.append(obj) idx += 1 else: break return ret def", "_save_metadata(self, metadata: Dict[str, any]) -> None: super().save(\"metadata\", metadata) def _get_metadata(self)", "idx += 1 else: break return ret def _get_next_index(self) ->", "metadata: Dict[str, any] = self._get_metadata() return metadata[\"next_index\"] def _increment_index(self) ->", "from simulator.services.resources.directory import Directory from simulator.services.services import Services class Atlas(Directory):", "int: metadata: Dict[str, any] = self._get_metadata() return metadata[\"next_index\"] def _increment_index(self)", "None: self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els: int = float(\"inf\"))", "while idx < max_els: obj: any = self.load(str(idx)) if obj:", "0, } self._save_metadata(metadata) def append(self, obj: any) -> None: self.save(str(self._get_next_index()),", "def __init__(self, services: Services, name: str, parent: str, create: bool", "def append(self, obj: any) -> None: self.save(str(self._get_next_index()), obj) self._increment_index() def", "metadata: Dict[str, any]) -> None: super().save(\"metadata\", metadata) def _get_metadata(self) ->", "load_all(self, max_els: int = float(\"inf\")) -> List[any]: ret: List[any] =", "= self._get_metadata() return metadata[\"next_index\"] def _increment_index(self) -> None: metadata: Dict[str,", "= { \"next_index\": 0, } self._save_metadata(metadata) def append(self, obj: any)", "+= 1 else: break return ret def _get_next_index(self) -> int:", "Dict, List from simulator.services.resources.directory import Directory from simulator.services.services import Services", "parent: str, create: bool = False) -> None: super().__init__(services, name,", "1 else: break return ret def _get_next_index(self) -> int: metadata:", "< max_els: obj: any = self.load(str(idx)) if obj: ret.append(obj) idx", "self.load(str(idx)) if obj: ret.append(obj) idx += 1 else: break return", "_get_next_index(self) -> int: metadata: Dict[str, any] = self._get_metadata() return metadata[\"next_index\"]", "-> None: super().save(\"metadata\", metadata) def _get_metadata(self) -> Dict[str, any]: return", "Directory from simulator.services.services import Services class Atlas(Directory): def __init__(self, services:", "-> List[any]: ret: List[any] = [] idx: int = 0", "typing import Dict, List from simulator.services.resources.directory import Directory from simulator.services.services", "= float(\"inf\")) -> List[any]: ret: List[any] = [] idx: int", "metadata[\"next_index\"] += 1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any]) ->", "create) if create: metadata: Dict[str, any] = { \"next_index\": 0,", "__init__(self, services: Services, name: str, parent: str, create: bool =", "False) -> None: super().__init__(services, name, parent, create) if create: metadata:", "List from simulator.services.resources.directory import Directory from simulator.services.services import Services class", "{ \"next_index\": 0, } self._save_metadata(metadata) def append(self, obj: any) ->", "= self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str,", "= 0 while idx < max_els: obj: any = self.load(str(idx))", "services: Services, name: str, parent: str, create: bool = False)", "= False) -> None: super().__init__(services, name, parent, create) if create:", "0 while idx < max_els: obj: any = self.load(str(idx)) if", "-> int: metadata: Dict[str, any] = self._get_metadata() return metadata[\"next_index\"] def", "= [] idx: int = 0 while idx < max_els:", "str, parent: str, create: bool = False) -> None: super().__init__(services,", "float(\"inf\")) -> List[any]: ret: List[any] = [] idx: int =", "any = self.load(str(idx)) if obj: ret.append(obj) idx += 1 else:", "max_els: int = float(\"inf\")) -> List[any]: ret: List[any] = []", "int = float(\"inf\")) -> List[any]: ret: List[any] = [] idx:", "List[any] = [] idx: int = 0 while idx <", "metadata: Dict[str, any] = self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata) def", "self.save(str(self._get_next_index()), obj) self._increment_index() def load_all(self, max_els: int = float(\"inf\")) ->", "self._increment_index() def load_all(self, max_els: int = float(\"inf\")) -> List[any]: ret:", "self._get_metadata() metadata[\"next_index\"] += 1 self._save_metadata(metadata) def _save_metadata(self, metadata: Dict[str, any])", "self._get_metadata() return metadata[\"next_index\"] def _increment_index(self) -> None: metadata: Dict[str, any]", "def _save_metadata(self, metadata: Dict[str, any]) -> None: super().save(\"metadata\", metadata) def", "-> None: super().__init__(services, name, parent, create) if create: metadata: Dict[str,", "simulator.services.services import Services class Atlas(Directory): def __init__(self, services: Services, name:", "any] = self._get_metadata() return metadata[\"next_index\"] def _increment_index(self) -> None: metadata:", "None: super().save(\"metadata\", metadata) def _get_metadata(self) -> Dict[str, any]: return super().load(\"metadata\")", "import Services class Atlas(Directory): def __init__(self, services: Services, name: str,", "Services, name: str, parent: str, create: bool = False) ->", "max_els: obj: any = self.load(str(idx)) if obj: ret.append(obj) idx +=", "metadata: Dict[str, any] = { \"next_index\": 0, } self._save_metadata(metadata) def", "obj) self._increment_index() def load_all(self, max_els: int = float(\"inf\")) -> List[any]:" ]
[ "compliance with the License. # You may obtain a copy", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2.0 (the \"License\"); # you may not use this file", "agreed to in writing, software # distributed under the License", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Unless required by applicable law or agreed to in writing,", "builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual", "CreateTableTestRequest: \"\"\"Specific test builder for the test\"\"\" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual(", "distributed under the License is distributed on an \"AS IS\"", "2022 Collate # Licensed under the Apache License, Version 2.0", "\"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from", "the specific language governing permissions and # limitations under the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "for `expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific test", "under the License. \"\"\" TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import", ") class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self)", "express or implied. # See the License for the specific", "applicable law or agreed to in writing, software # distributed", "except in compliance with the License. # You may obtain", "TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder", "TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "Copyright 2022 Collate # Licensed under the Apache License, Version", "writing, software # distributed under the License is distributed on", "in writing, software # distributed under the License is distributed", "you may not use this file except in compliance with", "of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for", "class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self) ->", "\"\"\" TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders", "governing permissions and # limitations under the License. \"\"\" TestCase", "metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal`", "use this file except in compliance with the License. #", "builder for the test\"\"\" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual( value=self.result[\"expectation_config\"][\"kwargs\"][\"value\"], ), test_type=TableTestType.tableRowCountToEqual,", "import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless", "CONDITIONS OF ANY KIND, either express or implied. # See", "metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class", "or implied. # See the License for the specific language", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "# limitations under the License. \"\"\" TestCase builder \"\"\" from", "limitations under the License. \"\"\" TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest", "License. # You may obtain a copy of the License", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License, Version 2.0 (the \"License\"); # you may not use", "# You may obtain a copy of the License at", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest", "http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "\"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific", "Collate # Licensed under the Apache License, Version 2.0 (the", "\"\"\"Specific test builder for the test\"\"\" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual( value=self.result[\"expectation_config\"][\"kwargs\"][\"value\"],", "TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self) -> CreateTableTestRequest:", "the License for the specific language governing permissions and #", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required", "(the \"License\"); # you may not use this file except", "at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "Apache License, Version 2.0 (the \"License\"); # you may not", "-> CreateTableTestRequest: \"\"\"Specific test builder for the test\"\"\" return self.build_test_request(", "# you may not use this file except in compliance", "metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import", "either express or implied. # See the License for the", "OR CONDITIONS OF ANY KIND, either express or implied. #", "tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder,", "test builder for the test\"\"\" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual( value=self.result[\"expectation_config\"][\"kwargs\"][\"value\"], ),", "_build_test(self) -> CreateTableTestRequest: \"\"\"Specific test builder for the test\"\"\" return", "the License is distributed on an \"AS IS\" BASIS, #", "the License. \"\"\" TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest", "in compliance with the License. # You may obtain a", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #", "software # distributed under the License is distributed on an", "def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific test builder for the test\"\"\"", "License. \"\"\" TestCase builder \"\"\" from metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from", "import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType", "language governing permissions and # limitations under the License. \"\"\"", "import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import (", "Version 2.0 (the \"License\"); # you may not use this", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0", "law or agreed to in writing, software # distributed under", "CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import TableTestType from", "expectation\"\"\" def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific test builder for the", "# Copyright 2022 Collate # Licensed under the Apache License,", "implied. # See the License for the specific language governing", "`expect_table_row_count_to_equal` GE expectation\"\"\" def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific test builder", "under the Apache License, Version 2.0 (the \"License\"); # you", "import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder):", "\"License\"); # you may not use this file except in", "License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "for the test\"\"\" return self.build_test_request( config=tableRowCountToEqual.TableRowCountToEqual( value=self.result[\"expectation_config\"][\"kwargs\"][\"value\"], ), test_type=TableTestType.tableRowCountToEqual, )", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "permissions and # limitations under the License. \"\"\" TestCase builder", "# Unless required by applicable law or agreed to in", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "the License. # You may obtain a copy of the", "for the specific language governing permissions and # limitations under", "and # limitations under the License. \"\"\" TestCase builder \"\"\"", "metadata.generated.schema.api.tests.createTableTest import CreateTableTestRequest from metadata.generated.schema.tests.table import tableRowCountToEqual from metadata.generated.schema.tests.tableTest import", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "to in writing, software # distributed under the License is", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "# See the License for the specific language governing permissions", "the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "You may obtain a copy of the License at #", "may not use this file except in compliance with the", "or agreed to in writing, software # distributed under the", "from metadata.generated.schema.tests.tableTest import TableTestType from metadata.great_expectations.builders.table.base_table_test_builders import ( BaseTableTestBuilder, )", "required by applicable law or agreed to in writing, software", "# http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "( BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\"", "with the License. # You may obtain a copy of", "this file except in compliance with the License. # You", "GE expectation\"\"\" def _build_test(self) -> CreateTableTestRequest: \"\"\"Specific test builder for", "the Apache License, Version 2.0 (the \"License\"); # you may", "BaseTableTestBuilder, ) class TableRowCountToEqualBuilder(BaseTableTestBuilder): \"\"\"Builder for `expect_table_row_count_to_equal` GE expectation\"\"\" def" ]
[ "min(y1 + h1, y2 + h2) - max(y1, y2) if", "annot[1] * self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot", "i_subplot in range(1, 11): plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes))", "for i_img in range(num_imgs): for i_object in range(num_objects): w, h", "w, h] print(\"Shapes: imgs \", self.imgs.shape, \" bboxes \", self.bboxes.shape)", "- max(y1, y2) if w_I <= 0 or h_I <=", "* self.WIDTH h = annot[3] * self.HEIGHT x = annot[0]", "+ GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])", "i_object in range(num_objects): w, h = np.random.randint(min_object_size, max_object_size, size=2) x", "2) plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none',", "or h_I <= 0: # no overlap return 0. I", "= 4 num_objects = 1 self.bboxes = np.zeros((num_imgs, num_objects, 4))", "fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower',", "bbox2[1], bbox2[2], bbox2[3] w_I = min(x1 + w1, x2 +", "y, w, h] ''' w = yolo_annot[2] * self.WIDTH h", "in range(1, 11): plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i],", "imgs \", self.imgs.shape, \" bboxes \", self.bboxes.shape) #why this? #", "and test. i = int(0.8 * num_imgs) train_X = X[:i]", "plt.show() def plot_rectangle(self, img, bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.')", "self.WIDTH = 8 self.HEIGHT = 8 num_imgs = 50000 min_object_size", "y[:i] test_y = y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:]", "of intersection over the area of unity''' x1, y1, w1,", "1 max_object_size = 4 num_objects = 1 self.bboxes = np.zeros((num_imgs,", "plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample,", "1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0],", "for i_subplot in range(1, 11): plt.subplot(1, 10, i_subplot) i =", "y:y+h, x:x+w] = 1. # set rectangle to 1 self.bboxes[i_img,", "the area of unity''' x1, y1, w1, h1 = bbox1[0],", "for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])", "slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset", "= int(0.8 * num_imgs) train_X = X[:i] #80% for training", "plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample)", "exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r')", "bbox1[1], bbox1[2], bbox1[3] x2, y2, w2, h2 = bbox2[0], bbox2[1],", "plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox,", "y2) if w_I <= 0 or h_I <= 0: #", "self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w] = 1. # set", "11): plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none',", "pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center',", "self.bboxes[i_img, i_object] = [x, y, w, h] print(\"Shapes: imgs \",", "self.HEIGHT x = (yolo_annot[0] * self.WIDTH) - (w/2) y =", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox in", "print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show a few images and", "bbox2): '''Calculate overlap between two bounding boxes [x, y, w,", "i_object] = [x, y, w, h] print(\"Shapes: imgs \", self.imgs.shape,", "w2 * h2 - I return I / U def", "plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample,", "extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate", "x:x+w] = 1. # set rectangle to 1 self.bboxes[i_img, i_object]", "generated imgs match to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1,", "plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower',", "'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]),", "- (h/2) return [x,y,w,h] def show_generated(self, i=0): fig = plt.figure()", "y2 + h2) - max(y1, y2) if w_I <= 0", "w = annot[2] * self.WIDTH h = annot[3] * self.HEIGHT", "X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs) X =", "plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample)", "X[i:] train_y = y[:i] test_y = y[i:] self.test_imgs = self.imgs[i:]", "range(1, 11): plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys',", "pred_bboxes): # Show a few images and predicted bounding boxes", "pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox)", "import tensorflow as tf import datetime class JriekeBboxDataset: def generate(self):", "i_img in range(num_imgs): for i_object in range(num_objects): w, h =", "to 1 self.bboxes[i_img, i_object] = [x, y, w, h] print(\"Shapes:", "i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT')", "range(num_imgs): for i_object in range(num_objects): w, h = np.random.randint(min_object_size, max_object_size,", "np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for", "= yolo_annot[3] * self.HEIGHT x = (yolo_annot[0] * self.WIDTH) -", "for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))", "fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs match", "print('Generating...') self.WIDTH = 8 self.HEIGHT = 8 num_imgs = 50000", "np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0, self.WIDTH - w) y", "bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig =", "yolo_annot -> [x, y, w, h] ''' w = yolo_annot[2]", "= min(x1 + w1, x2 + w2) - max(x1, x2)", "self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self,", "self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show()", "= 1 max_object_size = 4 num_objects = 1 self.bboxes =", "(h/2) return [x,y,w,h] def show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85)", "y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I =", "test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3))", "ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12,", "self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to 1", "= plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted =", "import matplotlib import numpy as np import tensorflow as tf", "self.imgs.shape, \" bboxes \", self.bboxes.shape) #why this? # X =", "size=2) x = np.random.randint(0, self.WIDTH - w) y = np.random.randint(0,", "yolo_annot[3] * self.HEIGHT x = (yolo_annot[0] * self.WIDTH) - (w/2)", "def plot_rectangle(self, img, bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85)", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2)", "set rectangle to 1 self.bboxes[i_img, i_object] = [x, y, w,", "= self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split training and test.", "= np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0, self.WIDTH - w)", "= False for i_subplot in range(1, 11): plt.subplot(1, 10, i_subplot)", "between two bounding boxes [x, y, w, h] as the", "- max(x1, x2) h_I = min(y1 + h1, y2 +", "self.WIDTH, 0, self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2],", "test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the", "w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I = min(x1", "8 num_imgs = 50000 min_object_size = 1 max_object_size = 4", "h] print(\"Shapes: imgs \", self.imgs.shape, \" bboxes \", self.bboxes.shape) #why", "- I return I / U def convertDefaultAnnotToCoord(self, annot): '''", "self.WIDTH) - (w/2) y = (yolo_annot[1] * self.HEIGHT) - (h/2)", "plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2,", "np.random.randint(0, self.WIDTH - w) y = np.random.randint(0, self.HEIGHT - h)", "tensorflow as tf import datetime class JriekeBboxDataset: def generate(self): print('Generating...')", "-> [x, y, w, h] ''' w = annot[2] *", "#gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)),", "self.WIDTH, self.HEIGHT)) # set background to 0 for i_img in", "+ w2) - max(x1, x2) h_I = min(y1 + h1,", "Split training and test. i = int(0.8 * num_imgs) train_X", "= self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox)", "y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2,", "w = yolo_annot[2] * self.WIDTH h = yolo_annot[3] * self.HEIGHT", "the generated imgs match to the test_X slice image') fig.subplots_adjust(top=0.85)", "= True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True) plt.show() # plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()),", "legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True) plt.show()", "bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox): fig", "the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by", "bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT'])", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1,", "w, h] ''' w = annot[2] * self.WIDTH h =", "plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show()", "based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt import matplotlib", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def", "bbox[2], bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig", "''' This code is based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot", "legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True) plt.show() #", "JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT = 8", "min_object_size = 1 max_object_size = 4 num_objects = 1 self.bboxes", "over the area of unity''' x1, y1, w1, h1 =", "= self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3],", "pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3],", "extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): #", "fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted:", "import numpy as np import tensorflow as tf import datetime", "[x, y, w, h] print(\"Shapes: imgs \", self.imgs.shape, \" bboxes", "plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted = False", "Random samples.') legend_plotted = False for i_subplot in range(1, 11):", "as plt import matplotlib import numpy as np import tensorflow", "h = yolo_annot[3] * self.HEIGHT x = (yolo_annot[0] * self.WIDTH)", "np import tensorflow as tf import datetime class JriekeBboxDataset: def", "0: # no overlap return 0. I = w_I *", "if w_I <= 0 or h_I <= 0: # no", "h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I = min(x1 +", "= self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X, train_y, test_X, test_y", "self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r',", "fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted", "plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs match to the", "* self.WIDTH) - (w/2) y = (yolo_annot[1] * self.HEIGHT) -", "train_X = X[:i] #80% for training test_X = X[i:] train_y", "plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox", "- w) y = np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h,", "x = np.random.randint(0, self.WIDTH - w) y = np.random.randint(0, self.HEIGHT", "self.bboxes.shape) #why this? # X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs))", "1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])", "\", self.bboxes.shape) #why this? # X = (self.imgs.reshape(num_imgs, -1) -", "self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show", "self.imgs y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split training", "pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b',", "sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0,", "bounding boxes from the test dataset. fig = plt.figure(figsize=(12, 3))", "y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split training and", "max(x1, x2) h_I = min(y1 + h1, y2 + h2)", "test_X = X[i:] train_y = y[:i] test_y = y[i:] self.test_imgs", "ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not", "plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check", "x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2,", "matplotlib.pyplot as plt import matplotlib import numpy as np import", "bboxes \", self.bboxes.shape) #why this? # X = (self.imgs.reshape(num_imgs, -1)", "bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2, w2, h2 = bbox2[0],", "+ h2) - max(y1, y2) if w_I <= 0 or", "rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0,", "= np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])", "return I / U def convertDefaultAnnotToCoord(self, annot): ''' annot ->", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],", "convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox)", "dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.')", "to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned", "-1) - np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs y =", "bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I = min(x1 + w1, x2", "exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox", "fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox): fig = plt.figure()", "= 50000 min_object_size = 1 max_object_size = 4 num_objects =", "training test_X = X[i:] train_y = y[:i] test_y = y[i:]", "(yolo_annot[1] * self.HEIGHT) - (h/2) return [x,y,w,h] def show_generated(self, i=0):", "* h1 + w2 * h2 - I return I", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox in self.bboxes[i]:", "plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self,", "plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none',", "''' annot -> [x, y, w, h] ''' w =", "* h2 - I return I / U def convertDefaultAnnotToCoord(self,", "in range(num_imgs): for i_object in range(num_objects): w, h = np.random.randint(min_object_size,", "self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def", "show_predicted(self, pred_bboxes): # Show a few images and predicted bounding", "= annot[0] * self.HEIGHT y = annot[1] * self.HEIGHT return", "/ np.std(self.imgs) X = self.imgs y = self.bboxes.reshape(num_imgs, -1) /", "- (w/2) y = (yolo_annot[1] * self.HEIGHT) - (h/2) return", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes):", "(self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs y", "by the dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none',", "<= 0 or h_I <= 0: # no overlap return", "''' w = yolo_annot[2] * self.WIDTH h = yolo_annot[3] *", "bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox):", "def show_predicted(self, pred_bboxes): # Show a few images and predicted", "h = np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0, self.WIDTH -", "np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set", "False for i_subplot in range(1, 11): plt.subplot(1, 10, i_subplot) i", "if not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5),", "for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox,", "few images and predicted bounding boxes from the test dataset.", "self.WIDTH h = yolo_annot[3] * self.HEIGHT x = (yolo_annot[0] *", "w_I <= 0 or h_I <= 0: # no overlap", "plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2],", "the area of intersection over the area of unity''' x1,", "fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3))", "\", self.imgs.shape, \" bboxes \", self.bboxes.shape) #why this? # X", "w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2, w2,", "annot[2] * self.WIDTH h = annot[3] * self.HEIGHT x =", "self.HEIGHT = 8 num_imgs = 50000 min_object_size = 1 max_object_size", "w1, x2 + w2) - max(x1, x2) h_I = min(y1", "x2 + w2) - max(x1, x2) h_I = min(y1 +", "#80% for training test_X = X[i:] train_y = y[:i] test_y", "bounding boxes [x, y, w, h] as the area of", "w_I = min(x1 + w1, x2 + w2) - max(x1,", "# print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox", "-> [x, y, w, h] ''' w = yolo_annot[2] *", "fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset class: used", "= y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X,", "self.bboxes[i:] return train_X, train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):", "convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt", "image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0,", "annot -> [x, y, w, h] ''' w = annot[2]", "self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show a few", "color='r') if not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5,", "self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X, train_y, test_X,", "self.test_bboxes = self.bboxes[i:] return train_X, train_y, test_X, test_y def check_dataset_image_compability(self,", "plt import matplotlib import numpy as np import tensorflow as", "bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1)", "ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU:", "self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split training and test. i", "= yolo_annot[2] * self.WIDTH h = yolo_annot[3] * self.HEIGHT x", "w, h] as the area of intersection over the area", "= X[i:] train_y = y[:i] test_y = y[i:] self.test_imgs =", "Show a few images and predicted bounding boxes from the", "def generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT = 8 num_imgs", "h1 + w2 * h2 - I return I /", "fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0,", "[x,y,w,h] def show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image", "h_I <= 0: # no overlap return 0. I =", "True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True) plt.show() # plt.savefig('plots/bw-single-rectangle_prediction_{0:%Y-%m-%d%H:%M:%S}.png'.format(datetime.datetime.now()), dpi=300)", "self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0],", "def IOU(self,bbox1, bbox2): '''Calculate overlap between two bounding boxes [x,", "print(\"Shapes: imgs \", self.imgs.shape, \" bboxes \", self.bboxes.shape) #why this?", "np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs y = self.bboxes.reshape(num_imgs, -1)", "fig.suptitle('Prediction demonstration. Random samples.') legend_plotted = False for i_subplot in", "test_y = y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:] return", "'''Calculate overlap between two bounding boxes [x, y, w, h]", "pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox,", "0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show() def", "fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH,", "this? # X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs)", "# no overlap return 0. I = w_I * h_I", "check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the", "U = w1 * h1 + w2 * h2 -", "extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none'))", "i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH,", "0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show a", "-1) / self.WIDTH # Split training and test. i =", "h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2, w2, h2", "print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap between two bounding boxes", "pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after", "as the area of intersection over the area of unity'''", "image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset class:", "test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated", "* h_I U = w1 * h1 + w2 *", "= annot[3] * self.HEIGHT x = annot[0] * self.HEIGHT y", "= np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w] = 1.", "used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0,", "annot[3] * self.HEIGHT x = annot[0] * self.HEIGHT y =", "(w/2) y = (yolo_annot[1] * self.HEIGHT) - (h/2) return [x,y,w,h]", "self.HEIGHT) - (h/2) return [x,y,w,h] def show_generated(self, i=0): fig =", "+ w2 * h2 - I return I / U", "x = annot[0] * self.HEIGHT y = annot[1] * self.HEIGHT", "self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r',", "self.WIDTH h = annot[3] * self.HEIGHT x = annot[0] *", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i],", "= plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys',", "np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w] = 1. #", "X = self.imgs y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH #", "bbox1[2], bbox1[3] x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2],", "bbox2[2], bbox2[3] w_I = min(x1 + w1, x2 + w2)", "IOU(self,bbox1, bbox2): '''Calculate overlap between two bounding boxes [x, y,", "w, h] ''' w = yolo_annot[2] * self.WIDTH h =", "''' import matplotlib.pyplot as plt import matplotlib import numpy as", "import matplotlib.pyplot as plt import matplotlib import numpy as np", "matplotlib import numpy as np import tensorflow as tf import", "# X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs) X", "[x, y, w, h] as the area of intersection over", "- h) self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle", "training and test. i = int(0.8 * num_imgs) train_X =", "extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]),", "= bbox1[0], bbox1[1], bbox1[2], bbox1[3] x2, y2, w2, h2 =", "test dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random", "training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1,", "from the test dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction", "imgs match to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2,", "[x, y, w, h] ''' w = yolo_annot[2] * self.WIDTH", "annot[0] * self.HEIGHT y = annot[1] * self.HEIGHT return [x,y,w,h]", "extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder:", "code is based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt", "used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0,", "x2) h_I = min(y1 + h1, y2 + h2) -", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2],", "overlap return 0. I = w_I * h_I U =", "self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used", "y = (yolo_annot[1] * self.HEIGHT) - (h/2) return [x,y,w,h] def", "h1, y2 + h2) - max(y1, y2) if w_I <=", "holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH,", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox in", "'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion:", "i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0,", "/ self.WIDTH # Split training and test. i = int(0.8", "X[:i] #80% for training test_X = X[i:] train_y = y[:i]", "pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none'))", "datetime class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT", "h) self.imgs[i_img, y:y+h, x:x+w] = 1. # set rectangle to", "bbox2[3] w_I = min(x1 + w1, x2 + w2) -", "0, self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before", "exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2],", "= np.random.randint(0, self.WIDTH - w) y = np.random.randint(0, self.HEIGHT -", "= np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0 for", "plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none',", "= (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs", "annot): ''' annot -> [x, y, w, h] ''' w", "def show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample", "0 for i_img in range(num_imgs): for i_object in range(num_objects): w,", "on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt import matplotlib import", "- np.mean(self.imgs)) / np.std(self.imgs) X = self.imgs y = self.bboxes.reshape(num_imgs,", "max_object_size = 4 num_objects = 1 self.bboxes = np.zeros((num_imgs, num_objects,", "self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X, train_y, test_X, test_y def", "def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if", "self.HEIGHT x = annot[0] * self.HEIGHT y = annot[1] *", "yolo_annot): ''' yolo_annot -> [x, y, w, h] ''' w", "[x, y, w, h] ''' w = annot[2] * self.WIDTH", "extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]):", "plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0],", "np.std(self.imgs) X = self.imgs y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH", "w_I * h_I U = w1 * h1 + w2", "1. # set rectangle to 1 self.bboxes[i_img, i_object] = [x,", "return [x,y,w,h] def show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated", "self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x,", "print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none'))", "= plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs match to", "print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) # exp_bbox =", "* self.HEIGHT x = (yolo_annot[0] * self.WIDTH) - (w/2) y", "class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT =", "plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted", "= annot[1] * self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): '''", "not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper center', bbox_to_anchor=(0.5, -0.5), fancybox=True)", "h_I U = w1 * h1 + w2 * h2", "{:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted =", "rectangle to 1 self.bboxes[i_img, i_object] = [x, y, w, h]", "0, self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],", "bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.show() def check_dataset_image_compability(self, test_X_sample, test_imgs_sample):", "test. i = int(0.8 * num_imgs) train_X = X[:i] #80%", "img, bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1,", "= annot[2] * self.WIDTH h = annot[3] * self.HEIGHT x", "convertDefaultAnnotToCoord(self, annot): ''' annot -> [x, y, w, h] '''", "def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x, y, w, h]", "tf import datetime class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH =", "i = int(0.8 * num_imgs) train_X = X[:i] #80% for", "fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img,", "y, w, h] as the area of intersection over the", "the test dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration.", "I = w_I * h_I U = w1 * h1", "(yolo_annot[0] * self.WIDTH) - (w/2) y = (yolo_annot[1] * self.HEIGHT)", "if the generated imgs match to the test_X slice image')", "max(y1, y2) if w_I <= 0 or h_I <= 0:", "test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check", "convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x, y, w, h] '''", "for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT])", "h] ''' w = annot[2] * self.WIDTH h = annot[3]", "[x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x, y, w,", "# Split training and test. i = int(0.8 * num_imgs)", "# Show a few images and predicted bounding boxes from", "/ U def convertDefaultAnnotToCoord(self, annot): ''' annot -> [x, y,", "* self.HEIGHT x = annot[0] * self.HEIGHT y = annot[1]", "fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower',", "self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap", "in range(num_objects): w, h = np.random.randint(min_object_size, max_object_size, size=2) x =", "image sample + GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH,", "samples.') legend_plotted = False for i_subplot in range(1, 11): plt.subplot(1,", "is based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt import", "train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig = plt.figure(figsize=(12,", "for i_object in range(num_objects): w, h = np.random.randint(min_object_size, max_object_size, size=2)", "= X[:i] #80% for training test_X = X[i:] train_y =", "y, w, h] print(\"Shapes: imgs \", self.imgs.shape, \" bboxes \",", "two bounding boxes [x, y, w, h] as the area", "x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I", "4 num_objects = 1 self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs", "3)) fig.suptitle('check if the generated imgs match to the test_X", "num_objects = 1 self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs =", "fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted = False for i_subplot", "return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot -> [x, y,", "y = np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w] =", "= 1. # set rectangle to 1 self.bboxes[i_img, i_object] =", "as tf import datetime class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH", "ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox): fig =", "0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used for", "self.HEIGHT y = annot[1] * self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self,", "plt.subplot(1, 10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower',", "bbox1[3] x2, y2, w2, h2 = bbox2[0], bbox2[1], bbox2[2], bbox2[3]", "yolo_annot[2] * self.WIDTH h = yolo_annot[3] * self.HEIGHT x =", "50000 min_object_size = 1 max_object_size = 4 num_objects = 1", "num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background", "plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]),", "match to the test_X slice image') fig.subplots_adjust(top=0.85) plt.subplot(1, 2, 1)", "plot_rectangle(self, img, bbox): fig = plt.figure() fig.suptitle('Plotting rectangle.') fig.subplots_adjust(top=0.85) plt.subplot(1,", "images and predicted bounding boxes from the test dataset. fig", "# exp_bbox = self.convertDefaultAnnotToCoord(exp_bbox) print('after convertion: pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]),", "w2) - max(x1, x2) h_I = min(y1 + h1, y2", "self.HEIGHT)) # set background to 0 for i_img in range(num_imgs):", "num_imgs) train_X = X[:i] #80% for training test_X = X[i:]", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2):", "range(num_objects): w, h = np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0,", "h] as the area of intersection over the area of", "exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2),", "+ w1, x2 + w2) - max(x1, x2) h_I =", "test_imgs_sample): fig = plt.figure(figsize=(12, 3)) fig.suptitle('check if the generated imgs", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox", "show_generated(self, i=0): fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample +", "in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show()", "self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion:", "of unity''' x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2],", "* self.WIDTH h = yolo_annot[3] * self.HEIGHT x = (yolo_annot[0]", "a few images and predicted bounding boxes from the test", "plt.gca().set_title('Returned by the dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys',", "exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox, exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if", "4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to", "\" bboxes \", self.bboxes.shape) #why this? # X = (self.imgs.reshape(num_imgs,", "int(0.8 * num_imgs) train_X = X[:i] #80% for training test_X", "num_imgs = 50000 min_object_size = 1 max_object_size = 4 num_objects", "# set rectangle to 1 self.bboxes[i_img, i_object] = [x, y,", "import datetime class JriekeBboxDataset: def generate(self): print('Generating...') self.WIDTH = 8", "max_object_size, size=2) x = np.random.randint(0, self.WIDTH - w) y =", "as np import tensorflow as tf import datetime class JriekeBboxDataset:", "unity''' x1, y1, w1, h1 = bbox1[0], bbox1[1], bbox1[2], bbox1[3]", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], ec='r',", "legend_plotted = False for i_subplot in range(1, 11): plt.subplot(1, 10,", "1 self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH,", "GT') plt.imshow(self.imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for", "def convertDefaultAnnotToCoord(self, annot): ''' annot -> [x, y, w, h]", "w, h = np.random.randint(min_object_size, max_object_size, size=2) x = np.random.randint(0, self.WIDTH", "the dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower',", "x = (yolo_annot[0] * self.WIDTH) - (w/2) y = (yolo_annot[1]", "1) plt.gca().set_title('Returned by the dataset class: used for training') plt.imshow(test_X_sample,", "2, 1) plt.gca().set_title('Returned by the dataset class: used for training')", "= [x, y, w, h] print(\"Shapes: imgs \", self.imgs.shape, \"", "plt.subplot(1, 2, 1) plt.gca().set_title('Returned by the dataset class: used for", "I return I / U def convertDefaultAnnotToCoord(self, annot): ''' annot", "I / U def convertDefaultAnnotToCoord(self, annot): ''' annot -> [x,", "plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img, bbox): fig = plt.figure() fig.suptitle('Plotting", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self,", "8 self.HEIGHT = 8 num_imgs = 50000 min_object_size = 1", "#why this? # X = (self.imgs.reshape(num_imgs, -1) - np.mean(self.imgs)) /", "self.WIDTH # Split training and test. i = int(0.8 *", "= min(y1 + h1, y2 + h2) - max(y1, y2)", "fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0], exp_bbox[1]), exp_bbox[2], exp_bbox[3], ec='b', fc='none')) plt.annotate('IOU: {:.2f}'.format(self.IOU(pred_bbox,", "= 8 self.HEIGHT = 8 num_imgs = 50000 min_object_size =", "to 0 for i_img in range(num_imgs): for i_object in range(num_objects):", "2, 2) plt.gca().set_title('Global image holder: used for plotting.') plt.imshow(test_imgs_sample, cmap='Greys',", "10, i_subplot) i = np.random.randint(len(pred_bboxes)) plt.imshow(self.test_imgs[i], cmap='Greys', interpolation='none', origin='lower', extent=[0,", "generate(self): print('Generating...') self.WIDTH = 8 self.HEIGHT = 8 num_imgs =", "cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def", "0, self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap between", "zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox)", "3)) fig.subplots_adjust(top=0.85) fig.suptitle('Prediction demonstration. Random samples.') legend_plotted = False for", "= w_I * h_I U = w1 * h1 +", "self.WIDTH, 0, self.HEIGHT]) for pred_bbox, exp_bbox in zip(pred_bboxes[i], self.test_bboxes[i]): #", "np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0 for i_img", "h2 - I return I / U def convertDefaultAnnotToCoord(self, annot):", "= 8 num_imgs = 50000 min_object_size = 1 max_object_size =", "area of unity''' x1, y1, w1, h1 = bbox1[0], bbox1[1],", "self.HEIGHT]) plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap between two", "w1 * h1 + w2 * h2 - I return", "''' yolo_annot -> [x, y, w, h] ''' w =", "0 or h_I <= 0: # no overlap return 0.", "background to 0 for i_img in range(num_imgs): for i_object in", "+ h1, y2 + h2) - max(y1, y2) if w_I", "self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) # set background to 0", "train_y = y[:i] test_y = y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes", "intersection over the area of unity''' x1, y1, w1, h1", "= self.imgs y = self.bboxes.reshape(num_imgs, -1) / self.WIDTH # Split", "min(x1 + w1, x2 + w2) - max(x1, x2) h_I", "interpolation='none', origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global", "plt.show() print('compare:',test_X_sample,test_imgs_sample) def show_predicted(self, pred_bboxes): # Show a few images", "plt.subplot(1, 1, 1) plt.imshow(img, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH, 0,", "y, w, h] ''' w = annot[2] * self.WIDTH h", "self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image holder: used for plotting.')", "for training test_X = X[i:] train_y = y[:i] test_y =", "h = annot[3] * self.HEIGHT x = annot[0] * self.HEIGHT", "= 1 self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs,", "= np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT)) #", "fig.suptitle('check if the generated imgs match to the test_X slice", "predicted bounding boxes from the test dataset. fig = plt.figure(figsize=(12,", "boxes [x, y, w, h] as the area of intersection", "set background to 0 for i_img in range(num_imgs): for i_object", "h_I = min(y1 + h1, y2 + h2) - max(y1,", "self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox = self.convertDefaultAnnotToCoord(pred_bbox) #", "dataset class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0,", "pred',pred_bbox, 'gt',exp_bbox) plt.gca().add_patch(matplotlib.patches.Rectangle((pred_bbox[0], pred_bbox[1]), pred_bbox[2], pred_bbox[3], ec='r', fc='none')) #gt plt.gca().add_patch(matplotlib.patches.Rectangle((exp_bbox[0],", "h] ''' w = yolo_annot[2] * self.WIDTH h = yolo_annot[3]", "* self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot): ''' yolo_annot ->", "* self.HEIGHT y = annot[1] * self.HEIGHT return [x,y,w,h] def", "U def convertDefaultAnnotToCoord(self, annot): ''' annot -> [x, y, w,", "y = annot[1] * self.HEIGHT return [x,y,w,h] def convertYoloAnnotToCoord(self, yolo_annot):", "* self.HEIGHT) - (h/2) return [x,y,w,h] def show_generated(self, i=0): fig", "= (yolo_annot[1] * self.HEIGHT) - (h/2) return [x,y,w,h] def show_generated(self,", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) for bbox in self.bboxes[i]: plt.gca().add_patch(matplotlib.patches.Rectangle((bbox[0],", "return train_X, train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig", "0. I = w_I * h_I U = w1 *", "This code is based on https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as", "= plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i], cmap='Greys',", "class: used for training') plt.imshow(test_X_sample, cmap='Greys', interpolation='none', origin='lower', extent=[0, self.WIDTH,", "h2) - max(y1, y2) if w_I <= 0 or h_I", "= (yolo_annot[0] * self.WIDTH) - (w/2) y = (yolo_annot[1] *", "self.WIDTH - w) y = np.random.randint(0, self.HEIGHT - h) self.imgs[i_img,", "= y[:i] test_y = y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes =", "fig = plt.figure() fig.subplots_adjust(top=0.85) fig.suptitle('Generated image sample + GT') plt.imshow(self.imgs[i],", "# set background to 0 for i_img in range(num_imgs): for", "https://github.com/jrieke/shape-detection/ ''' import matplotlib.pyplot as plt import matplotlib import numpy", "= self.bboxes[i:] return train_X, train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample,", "w) y = np.random.randint(0, self.HEIGHT - h) self.imgs[i_img, y:y+h, x:x+w]", "train_X, train_y, test_X, test_y def check_dataset_image_compability(self, test_X_sample, test_imgs_sample): fig =", "self.bboxes = np.zeros((num_imgs, num_objects, 4)) self.imgs = np.zeros((num_imgs, self.WIDTH, self.HEIGHT))", "''' w = annot[2] * self.WIDTH h = annot[3] *", "y[i:] self.test_imgs = self.imgs[i:] self.test_bboxes = self.bboxes[i:] return train_X, train_y,", "overlap between two bounding boxes [x, y, w, h] as", "<= 0: # no overlap return 0. I = w_I", "* num_imgs) train_X = X[:i] #80% for training test_X =", "no overlap return 0. I = w_I * h_I U", "= bbox2[0], bbox2[1], bbox2[2], bbox2[3] w_I = min(x1 + w1,", "bbox[1]), bbox[2], bbox[3], ec='r', fc='none')) plt.gca().legend(['GT']) plt.show() def plot_rectangle(self, img,", "exp_bbox)), (pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted = True", "(pred_bbox[0], pred_bbox[1]+pred_bbox[3]+0.2), color='r') if not legend_plotted: legend_plotted = True plt.gca().legend(['Pred','GT'],loc='upper", "and predicted bounding boxes from the test dataset. fig =", "origin='lower', extent=[0, self.WIDTH, 0, self.HEIGHT]) plt.subplot(1, 2, 2) plt.gca().set_title('Global image", "= w1 * h1 + w2 * h2 - I", "in zip(pred_bboxes[i], self.test_bboxes[i]): # print('before convertion: pred',pred_bbox, 'gt',exp_bbox) pred_bbox =", "plt.show() print('compare:',TMP,test_imgs_sample) def IOU(self,bbox1, bbox2): '''Calculate overlap between two bounding", "boxes from the test dataset. fig = plt.figure(figsize=(12, 3)) fig.subplots_adjust(top=0.85)", "demonstration. Random samples.') legend_plotted = False for i_subplot in range(1,", "1 self.bboxes[i_img, i_object] = [x, y, w, h] print(\"Shapes: imgs", "area of intersection over the area of unity''' x1, y1,", "numpy as np import tensorflow as tf import datetime class", "return 0. I = w_I * h_I U = w1" ]
[ "KeyError: pass def decreaseRating(peer): decreaseAmount = 0.1 minRating = -1", "def increaseRating(peer): increaseAmount = 0.1 maxRating = 1 with knownNodesLock:", "= None): if dirName is None: dirName = state.appdata with", "oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest in oldestList:", "= 2000 def saveKnownNodes(dirName = None): if dirName is None:", "pass def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"):", "import threading from bmconfigparser import BMConfigParser import state knownNodesLock =", "decreaseAmount = 0.1 minRating = -1 with knownNodesLock: for stream", "increaseAmount, maxRating) except KeyError: pass def decreaseRating(peer): decreaseAmount = 0.1", "pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount = 0.1 maxRating = 1", "is None: dirName = state.appdata with knownNodesLock: with open(dirName +", "= 0.1 minRating = -1 with knownNodesLock: for stream in", "decreaseAmount, minRating) except KeyError: pass def trimKnownNodes(recAddrStream = 1): if", "knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except KeyError: pass def", "knownNodes = {} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName = None):", "from bmconfigparser import BMConfigParser import state knownNodesLock = threading.Lock() knownNodes", "for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount,", "sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest in oldestList: del knownNodes[recAddrStream][oldest]", "output) def increaseRating(peer): increaseAmount = 0.1 maxRating = 1 with", "KeyError: pass def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\",", "bmconfigparser import BMConfigParser import state knownNodesLock = threading.Lock() knownNodes =", "except KeyError: pass def decreaseRating(peer): decreaseAmount = 0.1 minRating =", "knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] -", "with open(dirName + 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output) def", "with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"]", "threading.Lock() knownNodes = {} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName =", "in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except", "+ 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount", "knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] +", "0.1 minRating = -1 with knownNodesLock: for stream in knownNodes.keys():", "= min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except KeyError: pass def decreaseRating(peer):", "saveKnownNodes(dirName = None): if dirName is None: dirName = state.appdata", "= sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest in oldestList: del", "maxRating = 1 with knownNodesLock: for stream in knownNodes.keys(): try:", "- decreaseAmount, minRating) except KeyError: pass def trimKnownNodes(recAddrStream = 1):", "\"maxnodes\"): return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount]", "for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount,", "2000 def saveKnownNodes(dirName = None): if dirName is None: dirName", "in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except", "knownNodesTrimAmount = 2000 def saveKnownNodes(dirName = None): if dirName is", "open(dirName + 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output) def increaseRating(peer):", "except KeyError: pass def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) <", "'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount =", "knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest in", "with knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes,", "knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except KeyError:", "1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] =", "= {} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName = None): if", "import BMConfigParser import state knownNodesLock = threading.Lock() knownNodes = {}", "state.appdata with knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb') as output:", "minRating = -1 with knownNodesLock: for stream in knownNodes.keys(): try:", "min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except KeyError: pass def decreaseRating(peer): decreaseAmount", "= -1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"]", "len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream],", "import state knownNodesLock = threading.Lock() knownNodes = {} knownNodesTrimAmount =", "pickle import threading from bmconfigparser import BMConfigParser import state knownNodesLock", "BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x:", "< BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda", "with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for oldest", "= 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock:", "maxRating) except KeyError: pass def decreaseRating(peer): decreaseAmount = 0.1 minRating", "state knownNodesLock = threading.Lock() knownNodes = {} knownNodesTrimAmount = 2000", "dirName is None: dirName = state.appdata with knownNodesLock: with open(dirName", "knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except KeyError: pass def", "knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except KeyError:", "stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating)", "{} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName = None): if dirName", "if dirName is None: dirName = state.appdata with knownNodesLock: with", "return with knownNodesLock: oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount] for", "-1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] =", "= max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except KeyError: pass def trimKnownNodes(recAddrStream", "threading from bmconfigparser import BMConfigParser import state knownNodesLock = threading.Lock()", "dirName = state.appdata with knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb')", "pass def decreaseRating(peer): decreaseAmount = 0.1 minRating = -1 with", "= state.appdata with knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb') as", "= 0.1 maxRating = 1 with knownNodesLock: for stream in", "BMConfigParser import state knownNodesLock = threading.Lock() knownNodes = {} knownNodesTrimAmount", "def saveKnownNodes(dirName = None): if dirName is None: dirName =", "with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"]", "def decreaseRating(peer): decreaseAmount = 0.1 minRating = -1 with knownNodesLock:", "trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with", "as output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount = 0.1 maxRating", "def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return", "1): if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock: oldestList", "import pickle import threading from bmconfigparser import BMConfigParser import state", "max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except KeyError: pass def trimKnownNodes(recAddrStream =", "try: knownNodes[stream][peer][\"rating\"] = min(knownNodes[stream][peer][\"rating\"] + increaseAmount, maxRating) except KeyError: pass", "output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount = 0.1 maxRating =", "stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating)", "knownNodesLock: with open(dirName + 'knownnodes.dat', 'wb') as output: pickle.dump(knownNodes, output)", "increaseRating(peer): increaseAmount = 0.1 maxRating = 1 with knownNodesLock: for", "minRating) except KeyError: pass def trimKnownNodes(recAddrStream = 1): if len(knownNodes[recAddrStream])", "None: dirName = state.appdata with knownNodesLock: with open(dirName + 'knownnodes.dat',", "None): if dirName is None: dirName = state.appdata with knownNodesLock:", "= threading.Lock() knownNodes = {} knownNodesTrimAmount = 2000 def saveKnownNodes(dirName", "knownNodesLock = threading.Lock() knownNodes = {} knownNodesTrimAmount = 2000 def", "0.1 maxRating = 1 with knownNodesLock: for stream in knownNodes.keys():", "'wb') as output: pickle.dump(knownNodes, output) def increaseRating(peer): increaseAmount = 0.1", "+ increaseAmount, maxRating) except KeyError: pass def decreaseRating(peer): decreaseAmount =", "= 1 with knownNodesLock: for stream in knownNodes.keys(): try: knownNodes[stream][peer][\"rating\"]", "decreaseRating(peer): decreaseAmount = 0.1 minRating = -1 with knownNodesLock: for", "try: knownNodes[stream][peer][\"rating\"] = max(knownNodes[stream][peer][\"rating\"] - decreaseAmount, minRating) except KeyError: pass", "increaseAmount = 0.1 maxRating = 1 with knownNodesLock: for stream", "if len(knownNodes[recAddrStream]) < BMConfigParser().get(\"knownnodes\", \"maxnodes\"): return with knownNodesLock: oldestList =" ]
[ "console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def", "at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def _reboot():", "This will initiate a \"nice\" shutdown with a wall from", "def ssi(runlevel): # force a manual failover by failing a", "force a manual failover by failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"])", "that it # doesn't treat it as an AWOL console_log.info(\"Rebooting", "governed by a MIT-style # license that can be found", "it as an AWOL console_log.info(\"Rebooting %s per a STONITH request\"", "shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot)", "by a MIT-style # license that can be found in", "source code is governed by a MIT-style # license that", "wall from root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt else \"-h\",", "\"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS = [reboot_server,", "TODO: signal that manager that a STONITH has been done", "CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): # force a", "os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server", "a STONITH request\" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def", "console_log.info(\"Rebooting %s per a STONITH request\" % node) p_cfg.get_node(node).fence_reboot() def", "os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS = [reboot_server, shutdown_server, fail_node, stonith]", "manager request\") # reboot(8) just calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\",", "chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): #", "with a wall from root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt", "etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0)", "# Use of this source code is governed by a", "fail_node(): ssi(\"0\") def stonith(node): p_cfg = PacemakerConfig() # TODO: signal", "a STONITH has been done so that it # doesn't", "% node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server", "calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None,", "2018 DDN. All rights reserved. # Use of this source", "anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS", "_shutdown) def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server reboot per manager", "server reboot per manager request\") # reboot(8) just calls shutdown", "stonith(node): p_cfg = PacemakerConfig() # TODO: signal that manager that", "import PacemakerConfig def ssi(runlevel): # force a manual failover by", "can be found in the LICENSE file. import os from", "AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\") def stonith(node): p_cfg", "has been done so that it # doesn't treat it", "AgentShell from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from", "MIT-style # license that can be found in the LICENSE", "from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): # force a manual", "reserved. # Use of this source code is governed by", "by failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node():", "the LICENSE file. import os from chroma_agent.lib.shell import AgentShell from", "that can be found in the LICENSE file. import os", "chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): # force a manual failover", "if halt else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown)", "request\") # reboot(8) just calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time])", "it # doesn't treat it as an AWOL console_log.info(\"Rebooting %s", "(c) 2018 DDN. All rights reserved. # Use of this", "halt else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def", "\"-H\" if halt else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None,", "%s per a STONITH request\" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True,", "found in the LICENSE file. import os from chroma_agent.lib.shell import", "LICENSE file. import os from chroma_agent.lib.shell import AgentShell from chroma_agent.log", "file. import os from chroma_agent.lib.shell import AgentShell from chroma_agent.log import", "runlevel]) def fail_node(): ssi(\"0\") def stonith(node): p_cfg = PacemakerConfig() #", "of this source code is governed by a MIT-style #", "ssi(runlevel): # force a manual failover by failing a node", "# TODO: signal that manager that a STONITH has been", "been done so that it # doesn't treat it as", "shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server shutdown per manager request\")", "DDN. All rights reserved. # Use of this source code", "# reboot(8) just calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\")", "# This will initiate a \"nice\" shutdown with a wall", "that manager that a STONITH has been done so that", "_reboot(): console_log.info(\"Initiating server reboot per manager request\") # reboot(8) just", "this source code is governed by a MIT-style # license", "a MIT-style # license that can be found in the", "STONITH has been done so that it # doesn't treat", "manual failover by failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel])", "request\") # This will initiate a \"nice\" shutdown with a", "shutdown per manager request\") # This will initiate a \"nice\"", "a wall from root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt else", "node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\") def stonith(node):", "ssi(\"0\") def stonith(node): p_cfg = PacemakerConfig() # TODO: signal that", "request\" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating", "All rights reserved. # Use of this source code is", "AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS =", "that a STONITH has been done so that it #", "console_log.info(\"Initiating server shutdown per manager request\") # This will initiate", "per manager request\") # reboot(8) just calls shutdown anyhow. AgentShell.try_run([\"shutdown\",", "per manager request\") # This will initiate a \"nice\" shutdown", "def _reboot(): console_log.info(\"Initiating server reboot per manager request\") # reboot(8)", "shutdown with a wall from root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if", "signal that manager that a STONITH has been done so", "import os from chroma_agent.lib.shell import AgentShell from chroma_agent.log import console_log", "license that can be found in the LICENSE file. import", "def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server reboot per manager request\")", "at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server shutdown per manager request\") #", "treat it as an AWOL console_log.info(\"Rebooting %s per a STONITH", "will initiate a \"nice\" shutdown with a wall from root,", "console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS = [reboot_server, shutdown_server, fail_node,", "p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server shutdown per", "PacemakerConfig def ssi(runlevel): # force a manual failover by failing", "code is governed by a MIT-style # license that can", "chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import", "done so that it # doesn't treat it as an", "failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\")", "AgentShell.try_run([\"shutdown\", \"-H\" if halt else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise", "initiate a \"nice\" shutdown with a wall from root, etc.", "\"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def", "import AgentShell from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse", "be found in the LICENSE file. import os from chroma_agent.lib.shell", "# license that can be found in the LICENSE file.", "= PacemakerConfig() # TODO: signal that manager that a STONITH", "from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker", "AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\") def stonith(node): p_cfg =", "in the LICENSE file. import os from chroma_agent.lib.shell import AgentShell", "root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt else \"-h\", at_time]) console_log.info(\"Terminating\")", "an AWOL console_log.info(\"Rebooting %s per a STONITH request\" % node)", "_shutdown(): console_log.info(\"Initiating server shutdown per manager request\") # This will", "reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server reboot per manager request\") #", "console_log.info(\"Initiating server reboot per manager request\") # reboot(8) just calls", "def _shutdown(): console_log.info(\"Initiating server shutdown per manager request\") # This", "rights reserved. # Use of this source code is governed", "a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\") def", "as an AWOL console_log.info(\"Rebooting %s per a STONITH request\" %", "reboot(8) just calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0)", "def stonith(node): p_cfg = PacemakerConfig() # TODO: signal that manager", "Use of this source code is governed by a MIT-style", "PacemakerConfig() # TODO: signal that manager that a STONITH has", "import console_log from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig", "p_cfg = PacemakerConfig() # TODO: signal that manager that a", "import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel): # force", "server shutdown per manager request\") # This will initiate a", "manager request\") # This will initiate a \"nice\" shutdown with", "os from chroma_agent.lib.shell import AgentShell from chroma_agent.log import console_log from", "else \"-h\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"):", "is governed by a MIT-style # license that can be", "a \"nice\" shutdown with a wall from root, etc. AgentShell.try_run([\"shutdown\",", "# force a manual failover by failing a node AgentShell.try_run([\"sync\"])", "manager that a STONITH has been done so that it", "AWOL console_log.info(\"Rebooting %s per a STONITH request\" % node) p_cfg.get_node(node).fence_reboot()", "def shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server shutdown per manager", "\"nice\" shutdown with a wall from root, etc. AgentShell.try_run([\"shutdown\", \"-H\"", "def fail_node(): ssi(\"0\") def stonith(node): p_cfg = PacemakerConfig() # TODO:", "so that it # doesn't treat it as an AWOL", "# doesn't treat it as an AWOL console_log.info(\"Rebooting %s per", "CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server reboot per", "per a STONITH request\" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"):", "from root, etc. AgentShell.try_run([\"shutdown\", \"-H\" if halt else \"-h\", at_time])", "from chroma_agent.lib.shell import AgentShell from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner", "raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating server reboot", "just calls shutdown anyhow. AgentShell.try_run([\"shutdown\", \"-r\", at_time]) console_log.info(\"Terminating\") os._exit(0) raise", "at_time]) console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _reboot) ACTIONS = [reboot_server, shutdown_server,", "# Copyright (c) 2018 DDN. All rights reserved. # Use", "doesn't treat it as an AWOL console_log.info(\"Rebooting %s per a", "reboot per manager request\") # reboot(8) just calls shutdown anyhow.", "Copyright (c) 2018 DDN. All rights reserved. # Use of", "failover by failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\", runlevel]) def", "node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def _shutdown(): console_log.info(\"Initiating server shutdown", "chroma_agent.lib.shell import AgentShell from chroma_agent.log import console_log from chroma_agent.device_plugins.action_runner import", "a manual failover by failing a node AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"sync\"]) AgentShell.try_run([\"init\",", "from chroma_agent.device_plugins.action_runner import CallbackAfterResponse from chroma_agent.lib.pacemaker import PacemakerConfig def ssi(runlevel):", "AgentShell.try_run([\"init\", runlevel]) def fail_node(): ssi(\"0\") def stonith(node): p_cfg = PacemakerConfig()", "STONITH request\" % node) p_cfg.get_node(node).fence_reboot() def shutdown_server(halt=True, at_time=\"now\"): def _shutdown():", "console_log.info(\"Terminating\") os._exit(0) raise CallbackAfterResponse(None, _shutdown) def reboot_server(at_time=\"now\"): def _reboot(): console_log.info(\"Initiating" ]
[ "for all divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\"", "all divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\" Download", "in the provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self):", "all Census tracts in the provided state. \"\"\" return geotypes.TractsDownloader", "school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\" Download", "lower legislative districts in the provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader", "GEOTYPE_LIST = ( \"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\",", "legislative districts in the provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader", "return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders # @decorators.downloader def download_nationwide(self):", "download_zctas(self): \"\"\" Download data for Zip Code Tabulation Areas \"\"\"", "] # Validate the years for year in self.years_to_download: if", "districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download data", "geography type: {geo}\") # Run it try: dl() except NotImplementedError:", "to download and process tables from a Census API table.", "not dl or not callable(dl): raise NotImplementedError(f\"Invalid geography type: {geo}\")", "Census designated places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\"", "Download data for Public Use Microdata Areas. \"\"\" return geotypes.PumasDownloader", "\"\"\" # Set the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if", "for all counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\"", "for all Census designated places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader def", "school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download 'em", "# Validate it if not dl or not callable(dl): raise", "Reporter page explaining the ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" #", "== \"all\": self.years_to_download = self.YEAR_LIST # If the user provides", "\"all\": self.years_to_download = self.YEAR_LIST # If the user provides a", "return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\" Download data for Public", "NotImplementedError(error_msg) # Set the data directories if data_dir: self.data_dir =", "@property def censusreporter_url(self): \"\"\" Returns the URL of the Census", "\"\"\" return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\" Download data for", "for all Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self):", "places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\" Download data", "\"\"\" Download data for elementary school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader", "function dl = getattr(self, f\"download_{geo}\", None) # Validate it if", "from . import geotypes from . import decorators logger =", "self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make sure they", "2012, 2011, 2010, 2009 ] # All available geographies GEOTYPE_LIST", "geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download data for all Census", "def download_msas(self): \"\"\" Download data for Metropolitian Statistical Areas. \"\"\"", "logger = logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures how to download", "for all urban areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self):", "Download data for Zip Code Tabulation Areas \"\"\" return geotypes.ZctasDownloader", "= pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir", "return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\" Download data for American", "download_cnectas(self): \"\"\" Download data for combined New England cities and", "api_key) if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key required. Pass", "data for all states. \"\"\" return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self):", "return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\" Download data for unified", "data for New England cities and towns. \"\"\" return geotypes.NectasDownloader", "logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures how to download and process", "\"\"\" return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\" Download data for", "for elementary school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self):", "type: {geo}\") # Run it try: dl() except NotImplementedError: pass", "if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key required. Pass it", "\"\"\" Configuration. \"\"\" # Set the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\",", "areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\" Download data", "cities and towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\"", "provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download", "self.data_dir.joinpath(\"processed\") # Make sure they exist if not self.data_dir.exists(): self.data_dir.mkdir()", "Statistical Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\" Download", "cities and towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\"", "= (\"Data only available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg)", "geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\" Download data for unified school", "give those then. elif isinstance(years, list): self.years_to_download = list(map(int, years))", "\"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders # @decorators.downloader def", "process tables from a Census API table. \"\"\" import os", "Statistical Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\" Download", "def download_cnectas(self): \"\"\" Download data for combined New England cities", "Download data for all divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader def", "import pathlib from . import geotypes from . import decorators", "Download data for unified school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader", "data for secondary school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self):", "return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download data for all", "download, defaulting to most recent year # # If they", "@decorators.downloader def download_csas(self): \"\"\" Download data for Combined Statistical Areas.", "= self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make", "not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists():", "tracts in the provided state. \"\"\" return geotypes.TractsDownloader @decorators.downloader def", "\"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All available", "self.YEAR_LIST # If the user provides a year give them", "provides a year give them that. elif isinstance(years, int): self.years_to_download", "f\"download_{geo}\", None) # Validate it if not dl or not", "years, give it to them. if years == \"all\": self.years_to_download", "designated places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\" Download", "combined New England cities and towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader", "self.years_to_download = [years] # Or if they provide years as", "return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\" Download data for all", "/usr/bin/env python # -*- coding: utf-8 -* \"\"\" A base", "New England cities and towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader def", "@decorators.downloader def download_elementary_school_districts(self): \"\"\" Download data for elementary school districts.", "\"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\" Download data for", "they provided nothing, default to the latest year of data", "self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key required. Pass it as the", "\"\"\" return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\" Download data for", "return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\" Download data for Metropolitian", "elif years is None: self.years_to_download = [max(self.YEAR_LIST), ] # Validate", "return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\" Download data for all", "\"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\",", "THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All available years", "download_state_legislative_upper_districts(self): \"\"\" Download data for all Census upper legislative districts", "all urban areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\"", ". import geotypes from . import decorators logger = logging.getLogger(__name__)", "for year in self.years_to_download: if year not in self.YEAR_LIST: error_msg", "#! /usr/bin/env python # -*- coding: utf-8 -* \"\"\" A", "self.years_to_download = list(map(int, years)) # If they provided nothing, default", "years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set the data directories if", "provided state. \"\"\" return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\" Download", "and process tables from a Census API table. \"\"\" import", "Set the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY:", "\"\"\" Download data for Public Use Microdata Areas. \"\"\" return", "England cities and towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self):", "force # # Allow custom years for data download, defaulting", "to them. if years == \"all\": self.years_to_download = self.YEAR_LIST #", "\"\"\" Download data for all Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader", "Census lower legislative districts in the provided state. \"\"\" return", "data for Combined Statistical Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader def", "list(map(int, years)) # If they provided nothing, default to the", "@decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download data for all Census lower", "the first argument.\") self.source = source self.force = force #", "Allow custom years for data download, defaulting to most recent", "all Census upper legislative districts in the provided state. \"\"\"", "# Set the data directories if data_dir: self.data_dir = pathlib.Path(str(data_dir))", "Make sure they exist if not self.data_dir.exists(): self.data_dir.mkdir() if not", "Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\" Download data", "the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set the data directories", "geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\" Download data for Combined Statistical", "return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\" Download data for all", "NotImplementedError(\"Census API key required. Pass it as the first argument.\")", "download_places(self): \"\"\" Download data for all Census designated places. \"\"\"", "= pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All available years YEAR_LIST", "\"\"\" Download data for all Census upper legislative districts in", "api_key=None, source=\"acs5\", years=None, data_dir=None, force=False ): \"\"\" Configuration. \"\"\" #", "python # -*- coding: utf-8 -* \"\"\" A base class", "= source self.force = force # # Allow custom years", "download_elementary_school_districts(self): \"\"\" Download data for elementary school districts. \"\"\" return", "the ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders", "all Census lower legislative districts in the provided state. \"\"\"", "import decorators logger = logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures how", "2013, 2012, 2011, 2010, 2009 ] # All available geographies", "\"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\"", "downloaders # @decorators.downloader def download_nationwide(self): \"\"\" Download nationwide data. \"\"\"", "# Or if they provide years as a list, give", "data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\" Download data", "\"\"\" Download data for all divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader", "Public Use Microdata Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader def download_nectas(self):", "self.force = force # # Allow custom years for data", "Download data for all Census lower legislative districts in the", "from a Census API table. \"\"\" import os import logging", "ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders #", "@decorators.downloader def download_cnectas(self): \"\"\" Download data for combined New England", "[max(self.YEAR_LIST), ] # Validate the years for year in self.years_to_download:", "all states. \"\"\" return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\" Download", "def censusreporter_url(self): \"\"\" Returns the URL of the Census Reporter", "Download data for all Census upper legislative districts in the", "all counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\" Download", "force=False ): \"\"\" Configuration. \"\"\" # Set the inputs self.CENSUS_API_KEY", "\"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\" Download data for", "data for elementary school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def", "counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\" Download data", "that governs how to download and process tables from a", "they want all the years, give it to them. if", "If they want all the years, give it to them.", "data for all urban areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def", "return geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\" Download data for Combined", "data for unified school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def", "__init__( self, api_key=None, source=\"acs5\", years=None, data_dir=None, force=False ): \"\"\" Configuration.", "def download_tracts(self): \"\"\" Download data for all Census tracts in", "data download, defaulting to most recent year # # If", "nothing, default to the latest year of data elif years", "PARENT_DIR = THIS_DIR.parent # All available years YEAR_LIST = [", "lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\" Download data", "governs how to download and process tables from a Census", "self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\" Returns the URL of", "if year not in self.YEAR_LIST: error_msg = (\"Data only available", "districts in the provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def", "def __init__( self, api_key=None, source=\"acs5\", years=None, data_dir=None, force=False ): \"\"\"", "Download data for all Census designated places. \"\"\" return geotypes.PlacesDownloader", "f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set the data directories if data_dir:", "a Census API table. \"\"\" import os import logging import", "New England cities and towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader def", "self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property", "= self.data_dir.joinpath(\"processed\") # Make sure they exist if not self.data_dir.exists():", "Census tracts in the provided state. \"\"\" return geotypes.TractsDownloader @decorators.downloader", "that. elif isinstance(years, int): self.years_to_download = [years] # Or if", "self.years_to_download = self.YEAR_LIST # If the user provides a year", "Download data for Combined Statistical Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader", "pathlib from . import geotypes from . import decorators logger", "): \"\"\" Configuration. \"\"\" # Set the inputs self.CENSUS_API_KEY =", "latest year of data elif years is None: self.years_to_download =", "download_state_legislative_lower_districts(self): \"\"\" Download data for all Census lower legislative districts", "\"\"\" return geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\" Download data for", "tables from the Census API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR", "\"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\",", "geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\" Download data for all Census", "None) # Validate it if not dl or not callable(dl):", "def download_counties(self): \"\"\" Download data for all counties. \"\"\" return", "it to them. if years == \"all\": self.years_to_download = self.YEAR_LIST", "\"secondary_school_districts\" ) def __init__( self, api_key=None, source=\"acs5\", years=None, data_dir=None, force=False", "not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self):", "key required. Pass it as the first argument.\") self.source =", "If they provided nothing, default to the latest year of", "Validate the years for year in self.years_to_download: if year not", "a year give them that. elif isinstance(years, int): self.years_to_download =", "def download_divisions(self): \"\"\" Download data for all divisions. \"\"\" return", "years for year in self.years_to_download: if year not in self.YEAR_LIST:", "then. elif isinstance(years, list): self.years_to_download = list(map(int, years)) # If", "geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\" Download data for all states.", "states. \"\"\" return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\" Download data", "return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\" Download data for all", "@decorators.downloader def download_regions(self): \"\"\" Download data for all regions. \"\"\"", "to download and process tables from the Census API. \"\"\"", "data for all regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self):", "towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\" Download data", "source=\"acs5\", years=None, data_dir=None, force=False ): \"\"\" Configuration. \"\"\" # Set", "if not dl or not callable(dl): raise NotImplementedError(f\"Invalid geography type:", "self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\"", "download_aiannh_homelands(self): \"\"\" Download data for American Indian home lands. \"\"\"", "\"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\",", "for all regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\"", "Configures how to download and process tables from the Census", "\"\"\" Download data for all states. \"\"\" return geotypes.StatesDownloader @decorators.downloader", "for all Census tracts in the provided state. \"\"\" return", "they exist if not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir()", "Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\" Download data", "for combined New England cities and towns. \"\"\" return geotypes.CnectasDownloader", "the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY: raise", "download_msas(self): \"\"\" Download data for Metropolitian Statistical Areas. \"\"\" return", "them. if years == \"all\": self.years_to_download = self.YEAR_LIST # If", "process tables from the Census API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent", "error_msg = (\"Data only available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise", "def download_regions(self): \"\"\" Download data for all regions. \"\"\" return", "@decorators.downloader def download_congressional_districts(self): \"\"\" Download data for all Congressional districts.", "censusreporter_url(self): \"\"\" Returns the URL of the Census Reporter page", "\"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def __init__( self, api_key=None, source=\"acs5\",", "geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\" Download data for all Census", "\"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\",", "as a list, give those then. elif isinstance(years, list): self.years_to_download", "Download data for all regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader def", "pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All available years YEAR_LIST =", "all regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\" Download", "\"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download 'em all. \"\"\"", "return geotypes.CountiesDownloader @decorators.downloader def download_places(self): \"\"\" Download data for all", "years for data download, defaulting to most recent year #", "API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent # All", "self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make sure they exist if not", "Download data for all counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader def", "data for all divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader def download_states(self):", "def download_everything(self): \"\"\" Download 'em all. \"\"\" for geo in", "Download data for combined New England cities and towns. \"\"\"", "year not in self.YEAR_LIST: error_msg = (\"Data only available for", "# # Allow custom years for data download, defaulting to", "# # Geotype downloaders # @decorators.downloader def download_nationwide(self): \"\"\" Download", "def download_pumas(self): \"\"\" Download data for Public Use Microdata Areas.", "data elif years is None: self.years_to_download = [max(self.YEAR_LIST), ] #", "the latest year of data elif years is None: self.years_to_download", "Get the downloader function dl = getattr(self, f\"download_{geo}\", None) #", "os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key required.", "@decorators.downloader def download_nationwide(self): \"\"\" Download nationwide data. \"\"\" return geotypes.NationwideDownloader", "Download data for Metropolitian Statistical Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader", "they provide years as a list, give those then. elif", "user provides a year give them that. elif isinstance(years, int):", "return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\" Download data for elementary", "of the Census Reporter page explaining the ACS table. \"\"\"", "provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\" Download", "\"\"\" Download data for combined New England cities and towns.", "# If they want all the years, give it to", "\"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\",", "def download_places(self): \"\"\" Download data for all Census designated places.", "if years == \"all\": self.years_to_download = self.YEAR_LIST # If the", "2009 ] # All available geographies GEOTYPE_LIST = ( \"nationwide\",", "[years] # Or if they provide years as a list,", "def download_zctas(self): \"\"\" Download data for Zip Code Tabulation Areas", "geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\" Download data for secondary school", "is None: self.years_to_download = [max(self.YEAR_LIST), ] # Validate the years", "for American Indian home lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def", "import os import logging import pathlib from . import geotypes", "= list(map(int, years)) # If they provided nothing, default to", "\"\"\" return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\" Download data for", "if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir", "self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") #", "\"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\" Download data for", "provided nothing, default to the latest year of data elif", "elif isinstance(years, int): self.years_to_download = [years] # Or if they", "Download 'em all. \"\"\" for geo in self.GEOTYPE_LIST: print(geo) #", "= logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures how to download and", "# # If they want all the years, give it", "def download_unified_school_districts(self): \"\"\" Download data for unified school districts. \"\"\"", "self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir()", "\"\"\" for geo in self.GEOTYPE_LIST: print(geo) # Get the downloader", "for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set the data", "BaseTableConfig(object): \"\"\" Configures how to download and process tables from", "URL of the Census Reporter page explaining the ACS table.", "class that governs how to download and process tables from", "def download_urban_areas(self): \"\"\" Download data for all urban areas \"\"\"", "not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key required. Pass it as", "Download data for New England cities and towns. \"\"\" return", "geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\" Download data for all counties.", "self.GEOTYPE_LIST: print(geo) # Get the downloader function dl = getattr(self,", "If the user provides a year give them that. elif", "YEAR_LIST = [ 2017, 2016, 2015, 2014, 2013, 2012, 2011,", "available years YEAR_LIST = [ 2017, 2016, 2015, 2014, 2013,", "Download data for all urban areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader", "decorators logger = logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures how to", "2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009 ] #", "\"\"\" Download data for unified school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader", "for data download, defaulting to most recent year # #", "\"\"\" Download data for Metropolitian Statistical Areas. \"\"\" return geotypes.MsasDownloader", "@decorators.downloader def download_states(self): \"\"\" Download data for all states. \"\"\"", "return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\" Download data for all", "pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir =", "data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir =", "def download_nationwide(self): \"\"\" Download nationwide data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader", "the Census API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent", "A base class that governs how to download and process", "self.source = source self.force = force # # Allow custom", "of data elif years is None: self.years_to_download = [max(self.YEAR_LIST), ]", "= force # # Allow custom years for data download,", "return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\" Download data for secondary", "self.YEAR_LIST: error_msg = (\"Data only available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\")", "year in self.years_to_download: if year not in self.YEAR_LIST: error_msg =", "(\"Data only available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) #", "Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\" Download data", "\"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\",", "download_nectas(self): \"\"\" Download data for New England cities and towns.", "not callable(dl): raise NotImplementedError(f\"Invalid geography type: {geo}\") # Run it", "API key required. Pass it as the first argument.\") self.source", "and towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\" Download", "data for combined New England cities and towns. \"\"\" return", "download_secondary_school_districts(self): \"\"\" Download data for secondary school districts. \"\"\" return", "self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make sure they exist if", "\"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download data for", "geographies GEOTYPE_LIST = ( \"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\",", "download_csas(self): \"\"\" Download data for Combined Statistical Areas. \"\"\" return", "in self.GEOTYPE_LIST: print(geo) # Get the downloader function dl =", "data for all Census upper legislative districts in the provided", "Tabulation Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\" Download", "\"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\" Download data for", "Census API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR = THIS_DIR.parent #", "print(geo) # Get the downloader function dl = getattr(self, f\"download_{geo}\",", "\"\"\" return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\" Download data for", "@decorators.downloader def download_counties(self): \"\"\" Download data for all counties. \"\"\"", "the URL of the Census Reporter page explaining the ACS", "\"\"\" Download data for American Indian home lands. \"\"\" return", "for all Census upper legislative districts in the provided state.", "Download data for American Indian home lands. \"\"\" return geotypes.AiannhHomelandsDownloader", "the provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\"", "self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API", "for all Census lower legislative districts in the provided state.", "from the Census API. \"\"\" THIS_DIR = pathlib.Path(__file__).parent PARENT_DIR =", "Zip Code Tabulation Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self):", "how to download and process tables from a Census API", "list): self.years_to_download = list(map(int, years)) # If they provided nothing,", "explaining the ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype", "in self.years_to_download: if year not in self.YEAR_LIST: error_msg = (\"Data", "download_nationwide(self): \"\"\" Download nationwide data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader def", "in self.YEAR_LIST: error_msg = (\"Data only available for the years\"", "give it to them. if years == \"all\": self.years_to_download =", "\"\"\" Download data for New England cities and towns. \"\"\"", "@decorators.downloader def download_pumas(self): \"\"\" Download data for Public Use Microdata", "England cities and towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self):", "= ( \"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\",", "page explaining the ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # #", "as the first argument.\") self.source = source self.force = force", "\"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" )", "getattr(self, f\"download_{geo}\", None) # Validate it if not dl or", "how to download and process tables from the Census API.", "for Zip Code Tabulation Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader def", "most recent year # # If they want all the", "isinstance(years, list): self.years_to_download = list(map(int, years)) # If they provided", "raise NotImplementedError(f\"Invalid geography type: {geo}\") # Run it try: dl()", "# All available years YEAR_LIST = [ 2017, 2016, 2015,", "logging import pathlib from . import geotypes from . import", "os import logging import pathlib from . import geotypes from", "@decorators.downloader def download_secondary_school_districts(self): \"\"\" Download data for secondary school districts.", "download_counties(self): \"\"\" Download data for all counties. \"\"\" return geotypes.CountiesDownloader", "return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download 'em all. \"\"\" for", "\"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\",", "for New England cities and towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader", "if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\" Returns the", "# Get the downloader function dl = getattr(self, f\"download_{geo}\", None)", "2015, 2014, 2013, 2012, 2011, 2010, 2009 ] # All", "\"elementary_school_districts\", \"secondary_school_districts\" ) def __init__( self, api_key=None, source=\"acs5\", years=None, data_dir=None,", "years is None: self.years_to_download = [max(self.YEAR_LIST), ] # Validate the", "the Census Reporter page explaining the ACS table. \"\"\" return", "data for all Census lower legislative districts in the provided", "all. \"\"\" for geo in self.GEOTYPE_LIST: print(geo) # Get the", "download and process tables from a Census API table. \"\"\"", "All available geographies GEOTYPE_LIST = ( \"nationwide\", \"regions\", \"divisions\", \"states\",", "elif isinstance(years, list): self.years_to_download = list(map(int, years)) # If they", "Download nationwide data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\"", "\"\"\" return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\" Download data for", "API table. \"\"\" import os import logging import pathlib from", "download_pumas(self): \"\"\" Download data for Public Use Microdata Areas. \"\"\"", "the provided state. \"\"\" return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\"", "elementary school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\"", "= [max(self.YEAR_LIST), ] # Validate the years for year in", "\"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\",", "inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census", "years)) # If they provided nothing, default to the latest", "geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\" Download data for all Congressional", "download_congressional_districts(self): \"\"\" Download data for all Congressional districts. \"\"\" return", "tables from a Census API table. \"\"\" import os import", "raise NotImplementedError(\"Census API key required. Pass it as the first", "\"\"\" Download data for all Census tracts in the provided", "= THIS_DIR.parent # All available years YEAR_LIST = [ 2017,", "2010, 2009 ] # All available geographies GEOTYPE_LIST = (", "Metropolitian Statistical Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\"", "year of data elif years is None: self.years_to_download = [max(self.YEAR_LIST),", "for unified school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self):", "year # # If they want all the years, give", "for Public Use Microdata Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader def", "the years for year in self.years_to_download: if year not in", "def download_secondary_school_districts(self): \"\"\" Download data for secondary school districts. \"\"\"", "districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download 'em all.", "from . import decorators logger = logging.getLogger(__name__) class BaseTableConfig(object): \"\"\"", "Census Reporter page explaining the ACS table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\"", "[ 2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009", "@decorators.downloader def download_divisions(self): \"\"\" Download data for all divisions. \"\"\"", "data for Metropolitian Statistical Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader def", "dl = getattr(self, f\"download_{geo}\", None) # Validate it if not", "districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\" Download data", "def download_state_legislative_upper_districts(self): \"\"\" Download data for all Census upper legislative", "Use Microdata Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\"", "def download_state_legislative_lower_districts(self): \"\"\" Download data for all Census lower legislative", "else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\")", "the years, give it to them. if years == \"all\":", "isinstance(years, int): self.years_to_download = [years] # Or if they provide", "# Geotype downloaders # @decorators.downloader def download_nationwide(self): \"\"\" Download nationwide", "year give them that. elif isinstance(years, int): self.years_to_download = [years]", "download_regions(self): \"\"\" Download data for all regions. \"\"\" return geotypes.RegionsDownloader", "data for Public Use Microdata Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader", "\"\"\" return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\" Download data for", "Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download", "state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download data", "downloader function dl = getattr(self, f\"download_{geo}\", None) # Validate it", "Validate it if not dl or not callable(dl): raise NotImplementedError(f\"Invalid", "not in self.YEAR_LIST: error_msg = (\"Data only available for the", "the user provides a year give them that. elif isinstance(years,", "custom years for data download, defaulting to most recent year", "\"\"\" return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\" Download data for", "\"\"\" return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\" Download data for", "dl or not callable(dl): raise NotImplementedError(f\"Invalid geography type: {geo}\") #", "defaulting to most recent year # # If they want", "@decorators.downloader def download_nectas(self): \"\"\" Download data for New England cities", "Download data for secondary school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def", "class BaseTableConfig(object): \"\"\" Configures how to download and process tables", "available geographies GEOTYPE_LIST = ( \"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\",", "legislative districts in the provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader", "@decorators.downloader def download_unified_school_districts(self): \"\"\" Download data for unified school districts.", "# -*- coding: utf-8 -* \"\"\" A base class that", "= getattr(self, f\"download_{geo}\", None) # Validate it if not dl", "self.years_to_download = [max(self.YEAR_LIST), ] # Validate the years for year", "data for all Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def", "Pass it as the first argument.\") self.source = source self.force", "for Combined Statistical Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader def download_pumas(self):", "a list, give those then. elif isinstance(years, list): self.years_to_download =", "download_everything(self): \"\"\" Download 'em all. \"\"\" for geo in self.GEOTYPE_LIST:", "= self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make sure they exist", "provide years as a list, give those then. elif isinstance(years,", "Set the data directories if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else:", "# Make sure they exist if not self.data_dir.exists(): self.data_dir.mkdir() if", "the provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\"", "sure they exist if not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists():", "2011, 2010, 2009 ] # All available geographies GEOTYPE_LIST =", "@decorators.downloader def download_tracts(self): \"\"\" Download data for all Census tracts", "Microdata Areas. \"\"\" return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\" Download", "THIS_DIR.parent # All available years YEAR_LIST = [ 2017, 2016,", "Code Tabulation Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\"", "\"\"\" A base class that governs how to download and", "geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\" Download data for all regions.", "school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\" Download", "geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\" Download data for combined New", "-*- coding: utf-8 -* \"\"\" A base class that governs", "unified school districts. \"\"\" return geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\"", "import geotypes from . import decorators logger = logging.getLogger(__name__) class", "data for all Census designated places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader", "import logging import pathlib from . import geotypes from .", "None: self.years_to_download = [max(self.YEAR_LIST), ] # Validate the years for", "not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\" Returns the URL", "to most recent year # # If they want all", "\"\"\" Configures how to download and process tables from the", "years as a list, give those then. elif isinstance(years, list):", "# Validate the years for year in self.years_to_download: if year", "and towns. \"\"\" return geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\" Download", "\"\"\" Download 'em all. \"\"\" for geo in self.GEOTYPE_LIST: print(geo)", "to the latest year of data elif years is None:", "@decorators.downloader def download_zctas(self): \"\"\" Download data for Zip Code Tabulation", "geo in self.GEOTYPE_LIST: print(geo) # Get the downloader function dl", "@decorators.downloader def download_aiannh_homelands(self): \"\"\" Download data for American Indian home", "return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\" Download data for Zip", "self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\") self.processed_data_dir = self.data_dir.joinpath(\"processed\") # Make sure", "if not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not", "= os.getenv(\"CENSUS_API_KEY\", api_key) if not self.CENSUS_API_KEY: raise NotImplementedError(\"Census API key", "# If they provided nothing, default to the latest year", "Census API table. \"\"\" import os import logging import pathlib", "for geo in self.GEOTYPE_LIST: print(geo) # Get the downloader function", "utf-8 -* \"\"\" A base class that governs how to", "in the provided state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self):", "Indian home lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\"", "data for all counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader def download_places(self):", "\"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\",", "self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\" Returns", "geotypes.UnifiedSchoolDistrictsDownloader @decorators.downloader def download_elementary_school_districts(self): \"\"\" Download data for elementary school", "table. \"\"\" return f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders # @decorators.downloader", "2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010, 2009 ]", "\"\"\" Download data for Combined Statistical Areas. \"\"\" return geotypes.CsasDownloader", "source self.force = force # # Allow custom years for", "upper legislative districts in the provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader", "geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\" Download data for Zip Code", "\"\"\" return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\" Download data for", "\"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\", \"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\",", "coding: utf-8 -* \"\"\" A base class that governs how", "return geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\" Download data for New", "= self.YEAR_LIST # If the user provides a year give", "directories if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\")", "# @decorators.downloader def download_nationwide(self): \"\"\" Download nationwide data. \"\"\" return", "raise NotImplementedError(error_msg) # Set the data directories if data_dir: self.data_dir", "@decorators.downloader def download_msas(self): \"\"\" Download data for Metropolitian Statistical Areas.", "\"\"\" import os import logging import pathlib from . import", "those then. elif isinstance(years, list): self.years_to_download = list(map(int, years)) #", "it as the first argument.\") self.source = source self.force =", "'em all. \"\"\" for geo in self.GEOTYPE_LIST: print(geo) # Get", "required. Pass it as the first argument.\") self.source = source", "all Census designated places. \"\"\" return geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self):", "\"nectas\", \"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def", "Census upper legislative districts in the provided state. \"\"\" return", "years == \"all\": self.years_to_download = self.YEAR_LIST # If the user", "Download data for all states. \"\"\" return geotypes.StatesDownloader @decorators.downloader def", "geotypes from . import decorators logger = logging.getLogger(__name__) class BaseTableConfig(object):", "self, api_key=None, source=\"acs5\", years=None, data_dir=None, force=False ): \"\"\" Configuration. \"\"\"", "regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\" Download data", "Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader def download_unified_school_districts(self): \"\"\" Download data", "return geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\" Download data for all", "divisions. \"\"\" return geotypes.DivisionsDownloader @decorators.downloader def download_states(self): \"\"\" Download data", "geotypes.PumasDownloader @decorators.downloader def download_nectas(self): \"\"\" Download data for New England", "American Indian home lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self):", "Download data for all Census tracts in the provided state.", "( \"nationwide\", \"regions\", \"divisions\", \"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\",", "\"\"\" Download data for all counties. \"\"\" return geotypes.CountiesDownloader @decorators.downloader", "exist if not self.data_dir.exists(): self.data_dir.mkdir() if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if", "first argument.\") self.source = source self.force = force # #", "want all the years, give it to them. if years", "def download_aiannh_homelands(self): \"\"\" Download data for American Indian home lands.", "download_urban_areas(self): \"\"\" Download data for all urban areas \"\"\" return", "home lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader def download_tracts(self): \"\"\" Download", "def download_csas(self): \"\"\" Download data for Combined Statistical Areas. \"\"\"", "] # All available geographies GEOTYPE_LIST = ( \"nationwide\", \"regions\",", "\"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def __init__( self, api_key=None,", "it if not dl or not callable(dl): raise NotImplementedError(f\"Invalid geography", "\"\"\" return geotypes.MsasDownloader @decorators.downloader def download_csas(self): \"\"\" Download data for", "download and process tables from the Census API. \"\"\" THIS_DIR", "available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set the", "\"cnectas\", \"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def __init__(", "# Allow custom years for data download, defaulting to most", "for secondary school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\"", "only available for the years\" f\"{self.YEAR_LIST[-1]}-{self.YEAR_LIST[0]}.\") raise NotImplementedError(error_msg) # Set", "geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download 'em all. \"\"\" for geo", "state. \"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\" Download data", "@decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download data for all Census upper", "= [ 2017, 2016, 2015, 2014, 2013, 2012, 2011, 2010,", "int): self.years_to_download = [years] # Or if they provide years", "for Metropolitian Statistical Areas. \"\"\" return geotypes.MsasDownloader @decorators.downloader def download_csas(self):", "\"aiannh_homelands\", \"tracts\", \"zctas\", \"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def __init__( self,", "argument.\") self.source = source self.force = force # # Allow", "secondary school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader def download_everything(self): \"\"\" Download", "Or if they provide years as a list, give those", "# Set the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key) if not", "\"\"\" Download data for all regions. \"\"\" return geotypes.RegionsDownloader @decorators.downloader", "geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download data for all Census", "def download_nectas(self): \"\"\" Download data for New England cities and", "download_divisions(self): \"\"\" Download data for all divisions. \"\"\" return geotypes.DivisionsDownloader", "data_dir=None, force=False ): \"\"\" Configuration. \"\"\" # Set the inputs", "all Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\"", "@decorators.downloader def download_urban_areas(self): \"\"\" Download data for all urban areas", "f\"https://censusreporter.org/tables/{self.RAW_TABLE_NAME}/\" # # Geotype downloaders # @decorators.downloader def download_nationwide(self): \"\"\"", "urban areas \"\"\" return geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\" Download", "or not callable(dl): raise NotImplementedError(f\"Invalid geography type: {geo}\") # Run", "\"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader def download_state_legislative_upper_districts(self): \"\"\" Download data for", "\"\"\" Download data for secondary school districts. \"\"\" return geotypes.SecondarySchoolDistrictsDownloader", "NotImplementedError(f\"Invalid geography type: {geo}\") # Run it try: dl() except", "and process tables from the Census API. \"\"\" THIS_DIR =", "\"\"\" return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\" Download data for", "= [years] # Or if they provide years as a", "download_unified_school_districts(self): \"\"\" Download data for unified school districts. \"\"\" return", "# If the user provides a year give them that.", "return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\" Download data for all", "data for American Indian home lands. \"\"\" return geotypes.AiannhHomelandsDownloader @decorators.downloader", "download_states(self): \"\"\" Download data for all states. \"\"\" return geotypes.StatesDownloader", "Configuration. \"\"\" # Set the inputs self.CENSUS_API_KEY = os.getenv(\"CENSUS_API_KEY\", api_key)", "in the provided state. \"\"\" return geotypes.TractsDownloader @decorators.downloader def download_zctas(self):", "@decorators.downloader def download_places(self): \"\"\" Download data for all Census designated", "give them that. elif isinstance(years, int): self.years_to_download = [years] #", "geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\" Download data for Public Use", "districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader def download_secondary_school_districts(self): \"\"\" Download data", "\"\"\" Returns the URL of the Census Reporter page explaining", "return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def download_state_legislative_lower_districts(self): \"\"\" Download data for all", "geotypes.RegionsDownloader @decorators.downloader def download_divisions(self): \"\"\" Download data for all divisions.", "Download data for all Congressional districts. \"\"\" return geotypes.CongressionalDistrictsDownloader @decorators.downloader", "\"\"\" Download data for all Census designated places. \"\"\" return", "geotypes.UrbanAreasDownloader @decorators.downloader def download_msas(self): \"\"\" Download data for Metropolitian Statistical", "All available years YEAR_LIST = [ 2017, 2016, 2015, 2014,", "download_tracts(self): \"\"\" Download data for all Census tracts in the", "def download_elementary_school_districts(self): \"\"\" Download data for elementary school districts. \"\"\"", "data for Zip Code Tabulation Areas \"\"\" return geotypes.ZctasDownloader @decorators.downloader", "geotypes.PlacesDownloader @decorators.downloader def download_urban_areas(self): \"\"\" Download data for all urban", "\"\"\" return geotypes.StateLegislativeLowerDistrictsDownloader @decorators.downloader def download_counties(self): \"\"\" Download data for", "recent year # # If they want all the years,", ") def __init__( self, api_key=None, source=\"acs5\", years=None, data_dir=None, force=False ):", "nationwide data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader def download_regions(self): \"\"\" Download", "default to the latest year of data elif years is", "for all states. \"\"\" return geotypes.StatesDownloader @decorators.downloader def download_congressional_districts(self): \"\"\"", "\"\"\" Download data for Zip Code Tabulation Areas \"\"\" return", "2014, 2013, 2012, 2011, 2010, 2009 ] # All available", "years YEAR_LIST = [ 2017, 2016, 2015, 2014, 2013, 2012,", "self.years_to_download: if year not in self.YEAR_LIST: error_msg = (\"Data only", "Download data for elementary school districts. \"\"\" return geotypes.ElementarySchoolDistrictsDownloader @decorators.downloader", "Returns the URL of the Census Reporter page explaining the", "def download_congressional_districts(self): \"\"\" Download data for all Congressional districts. \"\"\"", "def download_states(self): \"\"\" Download data for all states. \"\"\" return", "table. \"\"\" import os import logging import pathlib from .", "districts in the provided state. \"\"\" return geotypes.StateLegislativeUpperDistrictsDownloader @decorators.downloader def", "the downloader function dl = getattr(self, f\"download_{geo}\", None) # Validate", "callable(dl): raise NotImplementedError(f\"Invalid geography type: {geo}\") # Run it try:", "base class that governs how to download and process tables", "towns. \"\"\" return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\" Download data", "state. \"\"\" return geotypes.TractsDownloader @decorators.downloader def download_zctas(self): \"\"\" Download data", "list, give those then. elif isinstance(years, list): self.years_to_download = list(map(int,", "\"states\", \"congressional_districts\", \"state_legislative_upper_districts\", \"state_legislative_lower_districts\", \"counties\", \"places\", \"urban_areas\", \"msas\", \"csas\", \"pumas\",", ". import decorators logger = logging.getLogger(__name__) class BaseTableConfig(object): \"\"\" Configures", "self.processed_data_dir.mkdir() @property def censusreporter_url(self): \"\"\" Returns the URL of the", "Combined Statistical Areas. \"\"\" return geotypes.CsasDownloader @decorators.downloader def download_pumas(self): \"\"\"", "\"\"\" Download data for all urban areas \"\"\" return geotypes.UrbanAreasDownloader", "\"\"\" Download data for all Census lower legislative districts in", "\"\"\" Download nationwide data. \"\"\" return geotypes.NationwideDownloader @decorators.downloader def download_regions(self):", "if not self.raw_data_dir.exists(): self.raw_data_dir.mkdir() if not self.processed_data_dir.exists(): self.processed_data_dir.mkdir() @property def", "-* \"\"\" A base class that governs how to download", "self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir = self.PARENT_DIR.joinpath(\"data\") self.raw_data_dir = self.data_dir.joinpath(\"raw\")", "# All available geographies GEOTYPE_LIST = ( \"nationwide\", \"regions\", \"divisions\",", "if they provide years as a list, give those then.", "them that. elif isinstance(years, int): self.years_to_download = [years] # Or", "all the years, give it to them. if years ==", "data directories if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir =", "years=None, data_dir=None, force=False ): \"\"\" Configuration. \"\"\" # Set the", "Geotype downloaders # @decorators.downloader def download_nationwide(self): \"\"\" Download nationwide data.", "return geotypes.NectasDownloader @decorators.downloader def download_cnectas(self): \"\"\" Download data for combined", "data for all Census tracts in the provided state. \"\"\"", "the data directories if data_dir: self.data_dir = pathlib.Path(str(data_dir)) else: self.data_dir", "geotypes.CnectasDownloader @decorators.downloader def download_aiannh_homelands(self): \"\"\" Download data for American Indian", "\"unified_school_districts\", \"elementary_school_districts\", \"secondary_school_districts\" ) def __init__( self, api_key=None, source=\"acs5\", years=None," ]
[ "print(f'{output_path.joinpath(output_name)} generated') if __name__ == \"__main__\": parser = argparse.ArgumentParser( description='')", "master file print('Prepare content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read())", "'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images') for", "-> None: print() print(f'Load content of {sgfpath}') try: sgf_content =", ") with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) #", "from pathlib import Path import shutil import subprocess import sys", "not variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments, } def", "str(move), '-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError:", "the mimetype file\" zf.write('mimetype') for root, dirs, files in os.walk('.'):", "file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ == \"__main__\": parser =", "#!/usr/bin/env python3 import argparse import os from pathlib import Path", "zip all content in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name =", "element html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1),", "nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content)", "creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w') as", "Path) -> None: print() print(f'Load content of {sgfpath}') try: sgf_content", "{sgfpath}, skipping') return nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments", "read only main sequence, not variations 'nb_moves': len(seq), 'metadata': game.get_properties(),", "'EPUB', 'Images') for move in range(1, nb_moves + 1): svgpath", "nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as", "files in os.walk('.'): for file in sorted(files): if file !=", "'Images') for move in range(1, nb_moves + 1): svgpath =", "svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move} could not be converted", "uuid4() with TemporaryDirectory() as tmpdir: print('Prepare structure of the ebook')", "argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files or directory') parser.add_argument('--output-path', '-o',", "of contents print('Prepare table of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB',", "'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) # Generate table of contents", "to SVG') continue # replace move number in SVG #", "= template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB',", "= sgf_content['metadata'] comments = sgf_content['comments'] uuid = uuid4() with TemporaryDirectory()", "for move in range(1, nb_moves + 1): svgpath = f'diagram_{move:03}.svg'", "pathlib import Path import shutil import subprocess import sys from", "= uuid4() with TemporaryDirectory() as tmpdir: print('Prepare structure of the", "range(1, nb_moves + 1): svgpath = f'diagram_{move:03}.svg' # generate SVG", "subprocess.CalledProcessError: print(f'Move {move} could not be converted to SVG') continue", "create HTML page with SVG element html_content = template.render( title=sgfpath.stem,", "parser.add_argument('--output-path', '-o', help='Output directory') args = parser.parse_args() path = Path(args.input_path)", "# type: ignore __version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE =", "path = Path(args.input_path) outpath = Path(args.output_path) if not path.exists(): print(f'Input", "root, dirs, files in os.walk('.'): for file in sorted(files): if", "import ZipFile import jinja2 import sente # type: ignore __version__", "= game.get_default_sequence() for idx, move in enumerate(seq, 1): game.play(move) if", "SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style', 'minimalist', '-o',", "sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments = sgf_content['comments'] uuid = uuid4()", "TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game =", "= Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath))", "directory') args = parser.parse_args() path = Path(args.input_path) outpath = Path(args.output_path)", "generated') if __name__ == \"__main__\": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path',", "if __name__ == \"__main__\": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i',", "UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd:", "load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments = {} seq =", "{move} could not be converted to SVG') continue # replace", "MUST be the mimetype file\" zf.write('mimetype') for root, dirs, files", "'toc.ncx').open('w') as fd: fd.write(toc_content) # zip all content in EPUB", "directly in sgf-render invocation at the moment svg_content = svgdirpath.joinpath(svgpath).open().read()", "comments[idx] = game.comment return { # read only main sequence,", "str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath),", "print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template =", "SVG # not possible directly in sgf-render invocation at the", "the OCF ZIP Container MUST be the mimetype file\" zf.write('mimetype')", "= Path(args.input_path) outpath = Path(args.output_path) if not path.exists(): print(f'Input path", "TemporaryDirectory from uuid import uuid4 from zipfile import ZipFile import", "as fd: fd.write(opf_content) # Generate table of contents print('Prepare table", "'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with", "f'diagram_{move:03}.svg' # generate SVG files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE,", "comments, } def main(sgfpath: Path, output_path: Path) -> None: print()", "Path): game = sente.sgf.load(str(sgfpath)) comments = {} seq = game.get_default_sequence()", "or directory') parser.add_argument('--output-path', '-o', help='Output directory') args = parser.parse_args() path", "try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read", "''), ) with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content)", "first file in the OCF ZIP Container MUST be the", "SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images') for move in", "svgpath = f'diagram_{move:03}.svg' # generate SVG files with sgf-render try:", "replace move number in SVG # not possible directly in", "Path(args.input_path) outpath = Path(args.output_path) if not path.exists(): print(f'Input path {path}", "main(sgfpath: Path, output_path: Path) -> None: print() print(f'Load content of", "zf.write('mimetype') for root, dirs, files in os.walk('.'): for file in", "TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate,", "'epub_template').resolve() def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments = {}", "only main sequence, not variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments':", "'_') with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) # \"The first", "comments = sgf_content['comments'] uuid = uuid4() with TemporaryDirectory() as tmpdir:", "page with SVG element html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata,", "os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ == \"__main__\": parser = argparse.ArgumentParser(", "Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare all", "game.comment return { # read only main sequence, not variations", "', '_') with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) # \"The", "try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style',", "= jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath =", "could not be converted to SVG') continue # replace move", "output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO',", "'--first-move-number', str(move), '-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except", "path {path} not found') sys.exit(1) if path.is_file(): main(path, outpath) if", "mimetype file\" zf.write('mimetype') for root, dirs, files in os.walk('.'): for", "subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style', 'minimalist',", "{path} not found') sys.exit(1) if path.is_file(): main(path, outpath) if path.is_dir():", "comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd:", "'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, )", "for file in sorted(files): if file != 'mimetype': zf.write(Path(root, file))", "zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ == \"__main__\": parser", "files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move),", "Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) # Generate table of", "title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move == nb_moves), comment=comments.get(move,", "= (1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__,", "'-i', help='Input files or directory') parser.add_argument('--output-path', '-o', help='Output directory') args", "# Generate table of contents print('Prepare table of contents') template", "SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath:", "help='Input files or directory') parser.add_argument('--output-path', '-o', help='Output directory') args =", "Declare all HTML/SVG files in master file print('Prepare content.opf file')", "fd: fd.write(html_content) # Declare all HTML/SVG files in master file", "os.chdir(tmpdir) # \"The first file in the OCF ZIP Container", "= game.comment return { # read only main sequence, not", "{sgfpath}') try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not", "= f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub\".replace('", "file in the OCF ZIP Container MUST be the mimetype", "'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move} could not", "import sys from tempfile import TemporaryDirectory from uuid import uuid4", "str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move}", "file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__", "print() print(f'Load content of {sgfpath}') try: sgf_content = load_sgf(sgfpath) except", "svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content)", "ZIP Container MUST be the mimetype file\" zf.write('mimetype') for root,", "== 1), last_flag=(move == nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir,", "comments = {} seq = game.get_default_sequence() for idx, move in", "Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments", "from zipfile import ZipFile import jinja2 import sente # type:", "import os from pathlib import Path import shutil import subprocess", "'RO' in metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name),", "found') sys.exit(1) if path.is_file(): main(path, outpath) if path.is_dir(): for filepath", "import sente # type: ignore __version__ = (1, 0, 0)", "print(f'Input path {path} not found') sys.exit(1) if path.is_file(): main(path, outpath)", "fd: fd.write(toc_content) # zip all content in EPUB file output_path.mkdir(exist_ok=True,", "with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare", "ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read())", "nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments = sgf_content['comments'] uuid", "move in range(1, nb_moves + 1): svgpath = f'diagram_{move:03}.svg' #", "in metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w')", "# read only main sequence, not variations 'nb_moves': len(seq), 'metadata':", "= sgf_content['comments'] uuid = uuid4() with TemporaryDirectory() as tmpdir: print('Prepare", "+ 1): svgpath = f'diagram_{move:03}.svg' # generate SVG files with", "OCF ZIP Container MUST be the mimetype file\" zf.write('mimetype') for", "SVG element html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move ==", "all content in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV',", "{} seq = game.get_default_sequence() for idx, move in enumerate(seq, 1):", "if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with", "sente # type: ignore __version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE", "svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create HTML", "opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with", "svgdirpath = Path(tmpdir, 'EPUB', 'Images') for move in range(1, nb_moves", "'./sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game", "jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range,", "'--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move} could", "output_path: Path) -> None: print() print(f'Load content of {sgfpath}') try:", "as zf: os.chdir(tmpdir) # \"The first file in the OCF", "sequence, not variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments, }", "info=metadata, first_flag=(move == 1), last_flag=(move == nb_moves), comment=comments.get(move, ''), )", "Path import shutil import subprocess import sys from tempfile import", "svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move == nb_moves), comment=comments.get(move, ''),", "shutil import subprocess import sys from tempfile import TemporaryDirectory from", "sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n', str(move),", "# zip all content in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name", "file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem,", "possible directly in sgf-render invocation at the moment svg_content =", "'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments, } def main(sgfpath: Path,", "with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) # \"The first file", "html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move", "content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render(", "if file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if", "title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w')", "path.exists(): print(f'Input path {path} not found') sys.exit(1) if path.is_file(): main(path,", "sgf_content['metadata'] comments = sgf_content['comments'] uuid = uuid4() with TemporaryDirectory() as", ") with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) # Generate", "jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath = Path(tmpdir,", "def main(sgfpath: Path, output_path: Path) -> None: print() print(f'Load content", "moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create", "output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata", "read {sgfpath}, skipping') return nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata']", "'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ == \"__main__\":", "file output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in", "= argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files or directory') parser.add_argument('--output-path',", "template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath", "# Declare all HTML/SVG files in master file print('Prepare content.opf", "metadata = sgf_content['metadata'] comments = sgf_content['comments'] uuid = uuid4() with", "'content.opf').open('w') as fd: fd.write(opf_content) # Generate table of contents print('Prepare", "SVG files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number',", "import argparse import os from pathlib import Path import shutil", "be the mimetype file\" zf.write('mimetype') for root, dirs, files in", "f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare all HTML/SVG files in", "'w') as zf: os.chdir(tmpdir) # \"The first file in the", "parser.parse_args() path = Path(args.input_path) outpath = Path(args.output_path) if not path.exists():", "type: ignore __version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render'", "sgf-render invocation at the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<',", "game = sente.sgf.load(str(sgfpath)) comments = {} seq = game.get_default_sequence() for", "= template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir,", "last_flag=(move == nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB', 'Text',", "1), last_flag=(move == nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB',", "in enumerate(seq, 1): game.play(move) if game.comment: comments[idx] = game.comment return", "'')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) #", "'metadata': game.get_properties(), 'comments': comments, } def main(sgfpath: Path, output_path: Path)", "print('Prepare SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images') for move", "import TemporaryDirectory from uuid import uuid4 from zipfile import ZipFile", "in the OCF ZIP Container MUST be the mimetype file\"", "1): svgpath = f'diagram_{move:03}.svg' # generate SVG files with sgf-render", "subprocess import sys from tempfile import TemporaryDirectory from uuid import", "'-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move", "in os.walk('.'): for file in sorted(files): if file != 'mimetype':", "game.get_default_sequence() for idx, move in enumerate(seq, 1): game.play(move) if game.comment:", "sorted(files): if file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated')", "with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) # zip all", "== \"__main__\": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files", "= sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments = sgf_content['comments'] uuid =", "dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams')", "parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files or directory')", "# create HTML page with SVG element html_content = template.render(", "path.is_file(): main(path, outpath) if path.is_dir(): for filepath in sorted(path.rglob('*.sgf')): main(filepath,", "in sgf-render invocation at the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write(", "uuid4 from zipfile import ZipFile import jinja2 import sente #", "not path.exists(): print(f'Input path {path} not found') sys.exit(1) if path.is_file():", "template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w')", "continue # replace move number in SVG # not possible", "print('Prepare table of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content", "ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir) # \"The first file in", "sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath},", "\"__main__\": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input files or", "structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template(", "== nb_moves), comment=comments.get(move, ''), ) with Path(tmpdir, 'EPUB', 'Text', f'page_{move:03}.html').open('w')", "# generate SVG files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath),", "game.play(move) if game.comment: comments[idx] = game.comment return { # read", "game.get_properties(), 'comments': comments, } def main(sgfpath: Path, output_path: Path) ->", "contents print('Prepare table of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read())", "UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd:", ") with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) # zip", "template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook',", "jinja2 import sente # type: ignore __version__ = (1, 0,", "toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir,", "= jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid,", "# \"The first file in the OCF ZIP Container MUST", "argparse import os from pathlib import Path import shutil import", "= {} seq = game.get_default_sequence() for idx, move in enumerate(seq,", "fd.write(html_content) # Declare all HTML/SVG files in master file print('Prepare", "seq = game.get_default_sequence() for idx, move in enumerate(seq, 1): game.play(move)", "sys.exit(1) if path.is_file(): main(path, outpath) if path.is_dir(): for filepath in", "as tmpdir: print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True)", "'--move-numbers', '--first-move-number', str(move), '-n', str(move), '--style', 'minimalist', '-o', svgdirpath.joinpath(svgpath), ])", "print(f'Move {move} could not be converted to SVG') continue #", "EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO'", "f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ',", "directory') parser.add_argument('--output-path', '-o', help='Output directory') args = parser.parse_args() path =", "= svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create HTML page", "enumerate=enumerate, ) with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) #", "tempfile import TemporaryDirectory from uuid import uuid4 from zipfile import", "# not possible directly in sgf-render invocation at the moment", "# replace move number in SVG # not possible directly", "''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w') as zf: os.chdir(tmpdir)", "for root, dirs, files in os.walk('.'): for file in sorted(files):", "'..', 'epub_template').resolve() def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments =", "not possible directly in sgf-render invocation at the moment svg_content", "svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create HTML page with", "move in enumerate(seq, 1): game.play(move) if game.comment: comments[idx] = game.comment", "fd.write(opf_content) # Generate table of contents print('Prepare table of contents')", "SVG') continue # replace move number in SVG # not", "args = parser.parse_args() path = Path(args.input_path) outpath = Path(args.output_path) if", "TemporaryDirectory() as tmpdir: print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir,", "if path.is_file(): main(path, outpath) if path.is_dir(): for filepath in sorted(path.rglob('*.sgf')):", "0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def", "in master file print('Prepare content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB',", "return nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments = sgf_content['comments']", "shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare", "'-o', help='Output directory') args = parser.parse_args() path = Path(args.input_path) outpath", "enumerate(seq, 1): game.play(move) if game.comment: comments[idx] = game.comment return {", "zipfile import ZipFile import jinja2 import sente # type: ignore", "skipping') return nb_moves = sgf_content['nb_moves'] metadata = sgf_content['metadata'] comments =", "content in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_'", "(1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__, '..',", "print(f'Could not read {sgfpath}, skipping') return nb_moves = sgf_content['nb_moves'] metadata", "ZipFile import jinja2 import sente # type: ignore __version__ =", "with TemporaryDirectory() as tmpdir: print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR,", "return { # read only main sequence, not variations 'nb_moves':", "{ # read only main sequence, not variations 'nb_moves': len(seq),", "file print('Prepare content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content", "invocation at the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<',", "ignore __version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR", "fd: fd.write(opf_content) # Generate table of contents print('Prepare table of", "(sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping') return nb_moves =", "'EPUB', 'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare all HTML/SVG", "]) except subprocess.CalledProcessError: print(f'Move {move} could not be converted to", "idx, move in enumerate(seq, 1): game.play(move) if game.comment: comments[idx] =", "HTML/SVG files in master file print('Prepare content.opf file') template =", "= jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves,", "at the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1))", "Path(args.output_path) if not path.exists(): print(f'Input path {path} not found') sys.exit(1)", "uuid import uuid4 from zipfile import ZipFile import jinja2 import", "'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images')", "files in master file print('Prepare content.opf file') template = jinja2.Template(", "!= 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)} generated') if __name__ ==", "diagrams') svgdirpath = Path(tmpdir, 'EPUB', 'Images') for move in range(1,", "import uuid4 from zipfile import ZipFile import jinja2 import sente", "= sente.sgf.load(str(sgfpath)) comments = {} seq = game.get_default_sequence() for idx,", "= './sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve() def load_sgf(sgfpath: Path):", "HTML page with SVG element html_content = template.render( title=sgfpath.stem, svgpath=svgpath,", "def load_sgf(sgfpath: Path): game = sente.sgf.load(str(sgfpath)) comments = {} seq", "print(f'Load content of {sgfpath}') try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException,", "'comments': comments, } def main(sgfpath: Path, output_path: Path) -> None:", "'Text', f'page_{move:03}.html').open('w') as fd: fd.write(html_content) # Declare all HTML/SVG files", "converted to SVG') continue # replace move number in SVG", "\"The first file in the OCF ZIP Container MUST be", "1): game.play(move) if game.comment: comments[idx] = game.comment return { #", "python3 import argparse import os from pathlib import Path import", "None: print() print(f'Load content of {sgfpath}') try: sgf_content = load_sgf(sgfpath)", "load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping') return", "except subprocess.CalledProcessError: print(f'Move {move} could not be converted to SVG')", "nb_moves + 1): svgpath = f'diagram_{move:03}.svg' # generate SVG files", "tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG", "template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move == nb_moves),", "<gh_stars>0 #!/usr/bin/env python3 import argparse import os from pathlib import", "not found') sys.exit(1) if path.is_file(): main(path, outpath) if path.is_dir(): for", "= f'diagram_{move:03}.svg' # generate SVG files with sgf-render try: subprocess.check_call([", "parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if 'RO' in metadata else", "in SVG # not possible directly in sgf-render invocation at", "all HTML/SVG files in master file print('Prepare content.opf file') template", "contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem,", "of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB',", "svg_content.replace('>1<', f'>{move}<', 1)) # create HTML page with SVG element", "tmpdir: print('Prepare structure of the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template", "__version__ = (1, 0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR =", "in sorted(files): if file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent) print(f'{output_path.joinpath(output_name)}", "jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content = template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')),", "files or directory') parser.add_argument('--output-path', '-o', help='Output directory') args = parser.parse_args()", "Generate table of contents print('Prepare table of contents') template =", "metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w') as", "template.render( title=sgfpath.stem, creator='sgf2ebook', UUID=uuid, svgpath=sorted(svgdirpath.glob('*.svg')), enumerate=enumerate, ) with Path(tmpdir, 'EPUB',", "uuid = uuid4() with TemporaryDirectory() as tmpdir: print('Prepare structure of", "as fd: fd.write(toc_content) # zip all content in EPUB file", "len(seq), 'metadata': game.get_properties(), 'comments': comments, } def main(sgfpath: Path, output_path:", "os from pathlib import Path import shutil import subprocess import", "move number in SVG # not possible directly in sgf-render", "as fd: fd.write(html_content) # Declare all HTML/SVG files in master", "os.walk('.'): for file in sorted(files): if file != 'mimetype': zf.write(Path(root,", "1)) # create HTML page with SVG element html_content =", "except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping') return nb_moves", "import subprocess import sys from tempfile import TemporaryDirectory from uuid", "__name__ == \"__main__\": parser = argparse.ArgumentParser( description='') parser.add_argument('--input-path', '-i', help='Input", "} def main(sgfpath: Path, output_path: Path) -> None: print() print(f'Load", "Path, output_path: Path) -> None: print() print(f'Load content of {sgfpath}')", "'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) # zip all content in", "if not path.exists(): print(f'Input path {path} not found') sys.exit(1) if", "sgf_content['comments'] uuid = uuid4() with TemporaryDirectory() as tmpdir: print('Prepare structure", "help='Output directory') args = parser.parse_args() path = Path(args.input_path) outpath =", "= parser.parse_args() path = Path(args.input_path) outpath = Path(args.output_path) if not", "of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render(", "parser.add_argument('--input-path', '-i', help='Input files or directory') parser.add_argument('--output-path', '-o', help='Output directory')", "the moment svg_content = svgdirpath.joinpath(svgpath).open().read() svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) #", "'-o', svgdirpath.joinpath(svgpath), ]) except subprocess.CalledProcessError: print(f'Move {move} could not be", "else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_') with ZipFile(output_path.joinpath(output_name), 'w') as zf:", "from tempfile import TemporaryDirectory from uuid import uuid4 from zipfile", "not read {sgfpath}, skipping') return nb_moves = sgf_content['nb_moves'] metadata =", "zf: os.chdir(tmpdir) # \"The first file in the OCF ZIP", "sente.sgf.load(str(sgfpath)) comments = {} seq = game.get_default_sequence() for idx, move", "if game.comment: comments[idx] = game.comment return { # read only", "svgdirpath.joinpath(svgpath).open('w').write( svg_content.replace('>1<', f'>{move}<', 1)) # create HTML page with SVG", "of {sgfpath}') try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could", "'unknown_event')}{'_' if 'RO' in metadata else ''}{metadata.get('RO', '')}.epub\".replace(' ', '_')", "not be converted to SVG') continue # replace move number", "for idx, move in enumerate(seq, 1): game.play(move) if game.comment: comments[idx]", "with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers', '--first-move-number', str(move), '-n',", "sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping') return nb_moves = sgf_content['nb_moves']", "content of {sgfpath}') try: sgf_content = load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException):", "print('Prepare content.opf file') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'content.opf').open().read()) opf_content =", "table of contents print('Prepare table of contents') template = jinja2.Template(", "in range(1, nb_moves + 1): svgpath = f'diagram_{move:03}.svg' # generate", "description='') parser.add_argument('--input-path', '-i', help='Input files or directory') parser.add_argument('--output-path', '-o', help='Output", "template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid,", "main(path, outpath) if path.is_dir(): for filepath in sorted(path.rglob('*.sgf')): main(filepath, outpath.joinpath(filepath.parent.relative_to(path)))", "dirs, files in os.walk('.'): for file in sorted(files): if file", "table of contents') template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content =", "with Path(tmpdir, 'EPUB', 'content.opf').open('w') as fd: fd.write(opf_content) # Generate table", "= template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move == 1), last_flag=(move ==", "Container MUST be the mimetype file\" zf.write('mimetype') for root, dirs,", "the ebook') shutil.copytree(TEMPLATEDIR, tmpdir, dirs_exist_ok=True) template = jinja2.Template( TEMPLATEDIR.joinpath('EPUB', 'Text',", "sys from tempfile import TemporaryDirectory from uuid import uuid4 from", "range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) #", "TEMPLATEDIR.joinpath('EPUB', 'toc.ncx').open().read()) toc_content = template.render( title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, )", "from uuid import uuid4 from zipfile import ZipFile import jinja2", "generate SVG files with sgf-render try: subprocess.check_call([ SGF_RENDER_EXECUTABLE, str(sgfpath), '--move-numbers',", "file\" zf.write('mimetype') for root, dirs, files in os.walk('.'): for file", "Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as fd: fd.write(toc_content) # zip all content", "in EPUB file output_path.mkdir(exist_ok=True, parents=True) output_name = f\"{metadata.get('EV', 'unknown_event')}{'_' if", "outpath = Path(args.output_path) if not path.exists(): print(f'Input path {path} not", "import shutil import subprocess import sys from tempfile import TemporaryDirectory", "game.comment: comments[idx] = game.comment return { # read only main", "f'>{move}<', 1)) # create HTML page with SVG element html_content", "first_flag=(move == 1), last_flag=(move == nb_moves), comment=comments.get(move, ''), ) with", "title=sgfpath.stem, UUID=uuid, nb_moves=nb_moves, range=range, ) with Path(tmpdir, 'EPUB', 'toc.ncx').open('w') as", "= load_sgf(sgfpath) except (sente.exceptions.InvalidSGFException, sente.exceptions.IllegalMoveException): print(f'Could not read {sgfpath}, skipping')", "number in SVG # not possible directly in sgf-render invocation", "= Path(tmpdir, 'EPUB', 'Images') for move in range(1, nb_moves +", "fd.write(toc_content) # zip all content in EPUB file output_path.mkdir(exist_ok=True, parents=True)", "variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments, } def main(sgfpath:", "main sequence, not variations 'nb_moves': len(seq), 'metadata': game.get_properties(), 'comments': comments,", "file in sorted(files): if file != 'mimetype': zf.write(Path(root, file)) os.chdir(Path(__file__).parent)", "Path(tmpdir, 'EPUB', 'Images') for move in range(1, nb_moves + 1):", "TEMPLATEDIR.joinpath('EPUB', 'Text', 'page_001.html').open().read()) print('Prepare SVG diagrams') svgdirpath = Path(tmpdir, 'EPUB',", "import Path import shutil import subprocess import sys from tempfile", "= Path(args.output_path) if not path.exists(): print(f'Input path {path} not found')", "import jinja2 import sente # type: ignore __version__ = (1,", "0, 0) SGF_RENDER_EXECUTABLE = './sgf-render' TEMPLATEDIR = Path(__file__, '..', 'epub_template').resolve()", "with SVG element html_content = template.render( title=sgfpath.stem, svgpath=svgpath, info=metadata, first_flag=(move", "be converted to SVG') continue # replace move number in" ]
[ "session=0, pop_bin=None, position=None): ''' Update the metric with a result", "self.train_actions = len(training_df.index) #group the data by the itemIds grp", "''' return (\"Popularity@\" + str( self.length ) + \": \"),", "must be sorted correctly. Parameters -------- result: pandas.Series Series of", "str( self.length ) + \": \"), ( self.sum / self.tests", "score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def", ") ) self.tests += 1 def result(self): ''' Return a", "of scores with the item id as the index '''", "= training_df.groupby('ItemId') #count the occurence of every itemid in the", "inplace=True) #normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def add(self, result,", "self.pop_scores = grp.size() #sort it according to the score self.pop_scores.sort_values(ascending=False,", "distinct item_ids there are in the training data ''' def", "of those top scorers items = recs.index.unique() self.sum += (", "data ''' def __init__(self, length=20, training_df=None): self.length = length; self.sum", "the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0]", "many distinct item_ids there are in the training data '''", "#only keep the k- first predictions recs = result[:self.length] #take", "Parameters ----------- length : int Coverage@length training_df : dataframe determines", "the occurence of every itemid in the trainingdataset self.pop_scores =", "and the correct next item. Result must be sorted correctly.", ") Used to iteratively calculate the average overall popularity of", "for_item=0, session=0, pop_bin=None, position=None): ''' Update the metric with a", "= len(training_df.index) #group the data by the itemIds grp =", "len(training_df.index) #group the data by the itemIds grp = training_df.groupby('ItemId')", "scores with the item id as the index ''' #only", "calculate the average overall popularity of an algorithm's recommendations. Parameters", "#group the data by the itemIds grp = training_df.groupby('ItemId') #count", "averaged value ''' return (\"Popularity@\" + str( self.length ) +", "with the item id as the index ''' #only keep", "= 0 self.tests = 0 self.train_actions = len(training_df.index) #group the", "grp.size() #sort it according to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize", "].sum() / len( items ) ) self.tests += 1 def", "self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0,", "pandas.Series Series of scores with the item id as the", "self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None): '''", "Popularity( length=20 ) Used to iteratively calculate the average overall", "the current averaged value ''' return (\"Popularity@\" + str( self.length", "id as the index ''' #only keep the k- first", "the index ''' #only keep the k- first predictions recs", "the itemIds grp = training_df.groupby('ItemId') #count the occurence of every", "of a description string and the current averaged value '''", "self.sum += ( self.pop_scores[ items ].sum() / len( items )", "self.pop_scores[ items ].sum() / len( items ) ) self.tests +=", "iteratively calculate the average overall popularity of an algorithm's recommendations.", "self.length = length; self.sum = 0 self.tests = 0 self.train_actions", "+= 1 def result(self): ''' Return a tuple of a", "with a result set and the correct next item. Result", "in the trainingdataset self.pop_scores = grp.size() #sort it according to", "determines how many distinct item_ids there are in the training", "the unique values out of those top scorers items =", "metric with a result set and the correct next item.", "length : int Coverage@length training_df : dataframe determines how many", "dataframe determines how many distinct item_ids there are in the", "itemid in the trainingdataset self.pop_scores = grp.size() #sort it according", "according to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores", "are in the training data ''' def __init__(self, length=20, training_df=None):", "def result(self): ''' Return a tuple of a description string", "pop_bin=None, position=None): ''' Update the metric with a result set", "to iteratively calculate the average overall popularity of an algorithm's", "a tuple of a description string and the current averaged", "scorers items = recs.index.unique() self.sum += ( self.pop_scores[ items ].sum()", "average overall popularity of an algorithm's recommendations. Parameters ----------- length", "result[:self.length] #take the unique values out of those top scorers", "def __init__(self, length=20, training_df=None): self.length = length; self.sum = 0", "items ) ) self.tests += 1 def result(self): ''' Return", "recommendations. Parameters ----------- length : int Coverage@length training_df : dataframe", "#take the unique values out of those top scorers items", ") self.tests += 1 def result(self): ''' Return a tuple", "Used to iteratively calculate the average overall popularity of an", "set and the correct next item. Result must be sorted", "first predictions recs = result[:self.length] #take the unique values out", "those top scorers items = recs.index.unique() self.sum += ( self.pop_scores[", "the data by the itemIds grp = training_df.groupby('ItemId') #count the", "index ''' #only keep the k- first predictions recs =", "be sorted correctly. Parameters -------- result: pandas.Series Series of scores", "length=20 ) Used to iteratively calculate the average overall popularity", "Popularity: ''' Popularity( length=20 ) Used to iteratively calculate the", "''' Popularity( length=20 ) Used to iteratively calculate the average", "= self.pop_scores / self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0, session=0,", "__init__(self, length=20, training_df=None): self.length = length; self.sum = 0 self.tests", "#normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def add(self, result, next_items,", "/ len( items ) ) self.tests += 1 def result(self):", "the trainingdataset self.pop_scores = grp.size() #sort it according to the", "current averaged value ''' return (\"Popularity@\" + str( self.length )", "recs.index.unique() self.sum += ( self.pop_scores[ items ].sum() / len( items", "top scorers items = recs.index.unique() self.sum += ( self.pop_scores[ items", "it according to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores =", "next_items, for_item=0, session=0, pop_bin=None, position=None): ''' Update the metric with", "next item. Result must be sorted correctly. Parameters -------- result:", "values out of those top scorers items = recs.index.unique() self.sum", "''' Update the metric with a result set and the", ": int Coverage@length training_df : dataframe determines how many distinct", "and the current averaged value ''' return (\"Popularity@\" + str(", "self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores / self.pop_scores[:1].values[0] def add(self,", "length=20, training_df=None): self.length = length; self.sum = 0 self.tests =", "popularity of an algorithm's recommendations. Parameters ----------- length : int", "a description string and the current averaged value ''' return", "training data ''' def __init__(self, length=20, training_df=None): self.length = length;", "the average overall popularity of an algorithm's recommendations. Parameters -----------", "= 0 self.train_actions = len(training_df.index) #group the data by the", "''' def __init__(self, length=20, training_df=None): self.length = length; self.sum =", "add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None): ''' Update the", "''' #only keep the k- first predictions recs = result[:self.length]", "0 self.tests = 0 self.train_actions = len(training_df.index) #group the data", "result(self): ''' Return a tuple of a description string and", "recs = result[:self.length] #take the unique values out of those", "string and the current averaged value ''' return (\"Popularity@\" +", "self.length ) + \": \"), ( self.sum / self.tests )", "Update the metric with a result set and the correct", "Result must be sorted correctly. Parameters -------- result: pandas.Series Series", "item_ids there are in the training data ''' def __init__(self,", "----------- length : int Coverage@length training_df : dataframe determines how", "self.tests += 1 def result(self): ''' Return a tuple of", "the training data ''' def __init__(self, length=20, training_df=None): self.length =", "( self.pop_scores[ items ].sum() / len( items ) ) self.tests", "in the training data ''' def __init__(self, length=20, training_df=None): self.length", "training_df=None): self.length = length; self.sum = 0 self.tests = 0", "= length; self.sum = 0 self.tests = 0 self.train_actions =", "items ].sum() / len( items ) ) self.tests += 1", "grp = training_df.groupby('ItemId') #count the occurence of every itemid in", "Coverage@length training_df : dataframe determines how many distinct item_ids there", "keep the k- first predictions recs = result[:self.length] #take the", "/ self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None):", "Return a tuple of a description string and the current", "value ''' return (\"Popularity@\" + str( self.length ) + \":", "result, next_items, for_item=0, session=0, pop_bin=None, position=None): ''' Update the metric", "Parameters -------- result: pandas.Series Series of scores with the item", "+= ( self.pop_scores[ items ].sum() / len( items ) )", "return (\"Popularity@\" + str( self.length ) + \": \"), (", "#count the occurence of every itemid in the trainingdataset self.pop_scores", "the correct next item. Result must be sorted correctly. Parameters", "predictions recs = result[:self.length] #take the unique values out of", "''' Return a tuple of a description string and the", "out of those top scorers items = recs.index.unique() self.sum +=", "int Coverage@length training_df : dataframe determines how many distinct item_ids", "itemIds grp = training_df.groupby('ItemId') #count the occurence of every itemid", "+ str( self.length ) + \": \"), ( self.sum /", "length; self.sum = 0 self.tests = 0 self.train_actions = len(training_df.index)", "training_df.groupby('ItemId') #count the occurence of every itemid in the trainingdataset", "class Popularity: ''' Popularity( length=20 ) Used to iteratively calculate", "overall popularity of an algorithm's recommendations. Parameters ----------- length :", "of an algorithm's recommendations. Parameters ----------- length : int Coverage@length", "the metric with a result set and the correct next", "result set and the correct next item. Result must be", "result: pandas.Series Series of scores with the item id as", "description string and the current averaged value ''' return (\"Popularity@\"", "algorithm's recommendations. Parameters ----------- length : int Coverage@length training_df :", "an algorithm's recommendations. Parameters ----------- length : int Coverage@length training_df", "data by the itemIds grp = training_df.groupby('ItemId') #count the occurence", "0 self.train_actions = len(training_df.index) #group the data by the itemIds", "trainingdataset self.pop_scores = grp.size() #sort it according to the score", "#sort it according to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores", "unique values out of those top scorers items = recs.index.unique()", "of every itemid in the trainingdataset self.pop_scores = grp.size() #sort", "def add(self, result, next_items, for_item=0, session=0, pop_bin=None, position=None): ''' Update", "how many distinct item_ids there are in the training data", "correctly. Parameters -------- result: pandas.Series Series of scores with the", "sorted correctly. Parameters -------- result: pandas.Series Series of scores with", "position=None): ''' Update the metric with a result set and", "len( items ) ) self.tests += 1 def result(self): '''", "to the score self.pop_scores.sort_values(ascending=False, inplace=True) #normalize self.pop_scores = self.pop_scores /", "the item id as the index ''' #only keep the", "self.pop_scores / self.pop_scores[:1].values[0] def add(self, result, next_items, for_item=0, session=0, pop_bin=None,", "= recs.index.unique() self.sum += ( self.pop_scores[ items ].sum() / len(", "item. Result must be sorted correctly. Parameters -------- result: pandas.Series", "the k- first predictions recs = result[:self.length] #take the unique", "correct next item. Result must be sorted correctly. Parameters --------", "-------- result: pandas.Series Series of scores with the item id", "Series of scores with the item id as the index", "item id as the index ''' #only keep the k-", "items = recs.index.unique() self.sum += ( self.pop_scores[ items ].sum() /", "self.sum = 0 self.tests = 0 self.train_actions = len(training_df.index) #group", "1 def result(self): ''' Return a tuple of a description", "k- first predictions recs = result[:self.length] #take the unique values", "self.tests = 0 self.train_actions = len(training_df.index) #group the data by", "by the itemIds grp = training_df.groupby('ItemId') #count the occurence of", "(\"Popularity@\" + str( self.length ) + \": \"), ( self.sum", "there are in the training data ''' def __init__(self, length=20,", "occurence of every itemid in the trainingdataset self.pop_scores = grp.size()", "as the index ''' #only keep the k- first predictions", "every itemid in the trainingdataset self.pop_scores = grp.size() #sort it", "a result set and the correct next item. Result must", "tuple of a description string and the current averaged value", "= result[:self.length] #take the unique values out of those top", "training_df : dataframe determines how many distinct item_ids there are", "= grp.size() #sort it according to the score self.pop_scores.sort_values(ascending=False, inplace=True)", ": dataframe determines how many distinct item_ids there are in" ]
[ "from . import views urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'),", "[ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'),", "views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$',", "url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type),", "name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'),", "import views urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info,", "url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$',", "url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$', views.ajax_delete_invite),", "]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$', views.ajax_delete_invite), url(r'^ajax/change-member-status/$', views.ajax_change_member_status), ]", ". import views urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$',", "url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make),", "import include, url from . import views urlpatterns = [", "include, url from . import views urlpatterns = [ url(r'^settings$',", "url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$', views.ajax_delete_invite), url(r'^ajax/change-member-status/$', views.ajax_change_member_status),", "views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles,", "name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'),", "name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$',", "url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$',", "name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$',", "url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model),", "url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$',", "= [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile,", "views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$', views.household_profile, name='maintain_household'), url(r'^members$', views.household_members,", "views.household_profile, name='maintain_household'), url(r'^members$', views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make),", "url from . import views urlpatterns = [ url(r'^settings$', views.household_dashboard,", "name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$',", "views.household_members, name='maintain_members'), url(r'^vehicles$', views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w", "urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'), url(r'^profile$',", "views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w -]{1,128})/$', views.ajax_add_model), url(r'^ajax/delete-invite/$', views.ajax_delete_invite), url(r'^ajax/change-member-status/$',", "django.conf.urls import include, url from . import views urlpatterns =", "views urlpatterns = [ url(r'^settings$', views.household_dashboard, name='household_dashboard'), url(r'^myinfo$', views.my_info, name='my_info'),", "from django.conf.urls import include, url from . import views urlpatterns", "views.household_vehicles, name='maintain_vehicles'), url(r'^ajax/models-by-make/(?P<make_id>\\d+)/$', views.ajax_models_by_make), url(r'^ajax/makes-by-type/(?P<type_id>\\d+)/$', views.ajax_makes_by_type), url(r'^ajax/add-make/(?P<type_key>\\d+)/(?P<make>[\\w ]{1,50})/$', views.ajax_add_make), url(r'^ajax/add-model/(?P<make_key>\\d+)/(?P<model>[\\w" ]
[ "True, # module_type = 10 # )), (\"asset\", Storage( name_nice", "#3: T(\"Reject\"), #4: T(\"Surplus\") } # ----------------------------------------------------------------------------- # Organisations #", "\"eden_nyc\" # Uncomment to show created_by/modified_by using Names not Emails", "Uncomment to disable responsive behavior of datatables # - Disabled", "#\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] # Don't", "# Don't include Email/Phone for unauthenticated users if current.auth.is_logged_in(): MOBILE", "used in list view so HTML is OK return A(s3_fullname(chair),", "= crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource", "(\"inv\", Storage( name_nice = T(\"Inventory\"), #description = \"Receiving and Sending", "[], # \"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [], #", "= r.get_vars # Context from a Profile page?\" organisation_id =", "organisation_id field.readable = field.writable = False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\",", "post Titles in Newsfeed settings.cms.show_titles = True # ----------------------------------------------------------------------------- #", "= s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent", "== old_rss).select(table.channel_id, table.enabled, limitby = (0, 1) ).first() if old", "dict(field = \"name\", options=\"iCal\" ) ), S3SQLInlineComponent( \"document\", name =", "False settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr):", "T(\"Photo\"), # multiple = False, # fields = [(\"\", \"image\")],", "automatically #settings.auth.registration_roles = { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), #", "have Filter form in Newsfeed be open by default settings.cms.filter_open", "result = standard_prep(r) else: result = True s3db = current.s3db", "Hide the language toolbar settings.L10n.display_toolbar = False # Default timezone", "list_fields += [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\",", "], label = T(\"Search\"), comment = T(\"You can search by", "from gluon import Field table.chairperson = Field.Method(\"chairperson\", chairperson) # Format", "(such as org/group) to breakout from tabs attr[\"native\"] = True", "= \"phone\", label = T(\"Phone\"), multiple = False, fields =", "name_nice = T(\"Supply Chain Management\"), #description = \"Used within Inventory", "s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser table = s3db.msg_parser _id =", "{ 1: T(\"Other Warehouse\") } settings.inv.send_types = { #21: T(\"Distribution\")", "----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep", ")), (\"cms\", Storage( name_nice = T(\"Content Management\"), #description = \"Content", "= True, module_type = 10, )), # Vehicle depends on", "hierarchical org_service: #S3SQLInlineLink( \"service\", label = T(\"Services\"), field = \"service_id\",", "= \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink( \"service\",", "True, #hidden = True, ), ] list_fields = [\"id\", \"name\",", "create r.component.table.site_id.default = None return result s3.prep = custom_prep #", "True, access = \"|1|\", # Only Administrators can see this", "= current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\",", "\"pr_person_details.father_name\" : [], # \"pr_person_details.company\" : [], # \"pr_person_details.affiliations\" :", "we already have a channel for this URL url_exists =", "\"person_id\" # Uncomment to use Rich Text editor in Newsfeed", ").first().location_id # Create Post ptable = s3db.cms_post _id = ptable.insert(series_id=series_id,", "# This item is handled separately for the menu #", "# Coming from req/create form # Hide most Fields from", "for NYC Prepared \"\"\" # Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name", "is possible to all still) module_type = 10 )), (\"org\",", "= crud_form, list_fields = list_fields, ) elif r.component_name == \"group_membership\":", "relief agencies to coordinate their activities', restricted = True, module_type", "Management\", # restricted = True, # module_type = 2, #", "= 10, # )), #(\"member\", Storage( # name_nice = T(\"Members\"),", "verify their email address? settings.auth.registration_requires_verification = True # Do new", "(T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r,", "settings.hrm.use_credentials = False # Uncomment to enable the use of", "name = \"media\", label = T(\"URLs (media, fundraising, website, social", "name_nice = T(\"Building Assessments\"), # #description = \"Building Safety Assessments\",", "= form_vars.get(\"address\", None) if address: form_vars.name = address else: #", "\"Edit\" # Uncomment to disable checking that LatLons are within", "# The user-visible functionality of this module isn't normally required.", "use an Autocomplete for Site lookup fields settings.org.site_autocomplete = True", "#filter = True, #header = \"\", hidden = True, ),", "\"name\", options=\"iCal\" ) ), S3SQLInlineComponent( \"document\", name = \"data\", label", "if we already have a channel for this Contact db", "use organisation_id instead of created_by in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\"", "= \"Central point to record details on People\", restricted =", "Default rss_import = None else: # Create form: Default rss_import", "parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check if we already have", "settings.ui.update_label = \"Edit\" # Uncomment to disable checking that LatLons", "try: marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0,", "= True, # module_type = 2, # )), (\"cms\", Storage(", "location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create", "item_represent(item.item_id)) body = \"%s\\n%s\" % (item, body) else: # Skills", "(\"US\",) settings.fin.currencies = { \"USD\" : T(\"United States Dollars\"), }", "the location, capacity and breakdown of victims in Shelters\", #", "settings.hrm.organisation_label = \"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3", "runs before prep \"\"\" s3db = current.s3db from s3 import", "Vehicle depends on Assets #(\"vehicle\", Storage( # name_nice = T(\"Vehicles\"),", "label = T(\"Location\"), fields = [(\"\", \"location_id\")], ), # Partner", "== r.record.pe_id) & \\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted", "output): # Call standard postp if callable(standard_postp): output = standard_postp(r,", "# Restrict the Location Selector to just certain countries #", "OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"), ]) # Authentication settings #", "Email/Phone for unauthenticated users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL", "\"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ]", "settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser", "#\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent( \"location\", label = T(\"Location\"), fields", "else: return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3", "Requests\", # restricted = True, # module_type = None #", "show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields", "S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label =", "restricted = True, # module_type = 10 # )), #", "Storage( name_nice = T(\"Admin\"), #description = \"Site Administration\", restricted =", "% dict(priority=priority, date=date) else: title = priority body = row.comments", "# Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser", "T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\") } # ----------------------------------------------------------------------------- # Organisations", "comment = T(\"You can search by by group name, description", "\".\" # Thousands separator for numbers (defaults to space) settings.L10n.thousands_separator", "work (form fails to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts =", "#description = \"Manage requests for supplies, assets, staff or other", "table = s3db[tablename] if not r.component and r.method in (None,", "url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource", "field = r.table.site_id # Don't assume that user is from", "limitby=(0, 1) ).first() # Build Title & Body from the", "r.component_name == \"human_resource\": # Don't assume that user is from", "unauthenticated users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\")", "3: # High marker = \"%s_red\" % marker elif reqs", "field.readable = field.writable = False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None", "= False # ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3 =", "S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels = (\"L3\", \"L4\"), #hidden =", "elif r.component_name == \"organisation\": # Add Network Status to List", "shouldn't be disabled (\"default\", Storage( name_nice = T(\"Home\"), restricted =", "\"Español\"), ]) # Authentication settings # These settings should be", "return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename):", "T(\"URLs (media, fundraising, website, social media, etc.\"), fields = [\"document_id\",", "r.get_vars # Context from a Profile page?\" organisation_id = get_vars.get(\"(organisation)\",", "Storage( name_nice = T(\"Messaging\"), #description = \"Sends & Receives Alerts", "_id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record =", "approved by an administrator prior to being able to login?", "None mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False,", "into location_id$addr_street fields = [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label", "standard_prep(r) if not result: return False if r.method not in", "\"human_resource.site_id\"), ] # Don't include Email/Phone for unauthenticated users if", "= T(\"Chairperson\") return True s3.prep = custom_prep return attr settings.customise_pr_group_controller", "& \\ (ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0, 1)", "is handled separately for the menu # )), (\"gis\", Storage(", "= True, ), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels = (\"L3\",", "= s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep):", "= [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ]", "for telephone numbers settings.L10n.default_country_code = 1 # Enable this to", "to the Street Address \"\"\" form_vars = form.vars name =", "Site lookup fields settings.org.site_autocomplete = True # Extra fields to", ") s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if r.component_name == \"group_membership\": from", "current.messages[\"NONE\"] db = current.db mtable = current.s3db.pr_group_membership ptable = db.pr_person", "), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter = True, #header =", "Label for Requester settings.req.requester_label = \"Site Contact\" # Filter Requester", "list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\",", "2.7 from collections import OrderedDict except: # Python 2.6 from", "the name to the Street Address \"\"\" form_vars = form.vars", "a new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments = \"Media\" settings.ui.update_label", "body) # Lookup series_id stable = s3db.cms_series try: series_id =", "gtable = db.gis_location query = (gtable.name == \"New York\") &", "settings for NYC Prepared \"\"\" # Pre-Populate settings.base.prepopulate = (\"NYC\",)", "people can help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm(", "current.db gtable = db.gis_location query = (gtable.name == \"New York\")", "\"\"\" # Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name = T(\"NYC Prepared\")", "show_address=True, show_postcode=True, ) elif r.component_name == \"human_resource\": # Don't assume", "disabled (\"default\", Storage( name_nice = T(\"Home\"), restricted = False, #", "r.method in (\"create\", \"update\"): field.label = \"\" # Gets replaced", "None # ImageCrop widget doesn't currently work within an Inline", "db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url, limitby = (0, 1)", "= \"eden_nyc\" # Uncomment to show created_by/modified_by using Names not", "Comment/uncomment modules here to disable/enable them settings.modules = OrderedDict([ #", "= standard_prep(r) else: result = True if not r.component and", "tabs attr[\"native\"] = True return attr settings.customise_org_group_controller = customise_org_group_controller #", "in projects settings.project.sectors = False # Multiple partner organizations settings.project.multiple_organisations", "= dict(field = \"contact_method\", options = \"FACEBOOK\" ) ), \"meetings\",", "# GeoNames username settings.gis.geonames_username = \"eden_nyc\" # Uncomment to show", "Network Status to List Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1,", "= True, ), ] list_fields = [\"id\", \"name\", \"code\", \"organisation_id\",", "date_required: date = rtable.date_required.represent(date_required) title = \"%(priority)s by %(date)s\" %", "= [(\"\", \"url\")], filterby = dict(field = \"name\", options=\"iCal\" )", "so rename old one name_exists.update_record(name=\"%s (Old)\" % name) if name_exists.enabled:", "the language toolbar settings.L10n.display_toolbar = False # Default timezone for", "\"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser = \"NYC\"", "= False #settings.req.use_commit = False settings.req.requester_optional = True settings.req.date_writable =", "\"level\", options = \"L4\" ), # @ToDo: GroupedCheckbox Widget or", "T(\"Network\"), link = False, fields = [(\"\", \"group_id\")], multiple =", "s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if r.method", "= \"contact_method\", options = \"TWITTER\" ) ), S3SQLInlineComponent( \"contact\", name", "Restrict the Location Selector to just certain countries # NB", "T(\"Network\"), #hidden = True, ), ] # Need to re-do", "= \"Create, enter, and manage surveys.\", restricted = True, module_type", "Port these Assessments to the Survey module #(\"building\", Storage( #", "\"iCAL\", multiple = False, fields = [(\"\", \"url\")], filterby =", "= dict(field = \"role\", options = \"2\" ) ), S3SQLInlineComponent(", "S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden = True, ), S3OptionsFilter(\"group_membership.group_id\", label", "import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict", "# Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator =", "found: cannot set rss_import correctly\" % r.component_id) # Default rss_import", "to use an Autocomplete for Site lookup fields settings.org.site_autocomplete =", "in project/task. settings.project.milestones = False # Uncomment this to disable", "table = s3db.org_group list_fields = [\"name\", \"mission\", \"website\", \"meetings\", ]", "= s3db.pr_image.image # image_field.requires = None if r.interactive or r.representation", "item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item in items:", "fields = [\"organisation_id\", \"comments\", # NB This is labelled 'Role'", "Enable this to have Open links in IFrames open a", "use for Facilities Map @ToDo: Legend \"\"\" db = current.db", "% marker mtable = db.gis_marker try: marker = db(mtable.name ==", "= True, ), # activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label", "if \"rheader\" in output: # Custom Tabs tabs = [(T(\"Basic", "r.interactive: tablename = \"org_facility\" table = s3db[tablename] if not r.component", "Storage( name_nice = T(\"Ticket Viewer\"), #description = \"Needed for Breadcrumbs\",", "= False, ), \"meetings\", \"comments\", ) filter_widgets = [ S3TextFilter([\"name\",", "\"description\", \"organisation.name\", \"organisation.acronym\", ], label = T(\"Name\"), _class = \"filter-search\",", "# Not displayed )), (\"inv\", Storage( name_nice = T(\"Inventory\"), #description", "restricted = True, module_type = 3, )), #(\"vol\", Storage( #", "of a group \"\"\" if hasattr(row, \"pr_group\"): row = row.pr_group", "ACLs # Enable this to have Open links in IFrames", "skill in skills: item = \"%s %s\" % (skill.quantity, skill_represent(skill.skill_id))", "module_type = 10 )), (\"org\", Storage( name_nice = T(\"Locations\"), #description", "output[\"item\"].add_class(\"pr_person\") return output s3.postp = custom_postp return attr settings.customise_pr_person_controller =", "List Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result", "r.component_name == \"facility\": if r.method in (None, \"create\", \"update\"): from", "= form_vars.name table = s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id,", "customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "\"Create, enter, and manage surveys.\", restricted = True, module_type =", "hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"), # #hidden =", "deleted, so we should disable it old_rss = data[0][\"value\"][\"value\"] table", "Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id,", "default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db =", "= None # Location is that of the site otable", "= [(\"\", \"image\")], # filterby = dict(field = \"profile\", #", "for this Contact db = current.db name = form_vars.name table", "1) ).first() except: marker = db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width,", "= True # Uncomment to show Links in Newsfeed settings.cms.show_links", "# Update form ctable = s3db.pr_contact query = (ctable.pe_id ==", "result = standard_prep(r) else: result = True if not r.component", "customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management settings.req.req_type = [\"People\", \"Stock\"]#,", "#cols = 5, ), \"phone\", S3SQLInlineComponent( \"contact\", name = \"phone2\",", "= current.s3db #if r.method == \"validate\": # # Can't validate", "# module_type = None # This item is handled separately", "= [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name = \"human_resource\", label", "S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\", label = T(\"Type\"), multiple =", "s3db.req_req # Read the full record row = db(rtable.id ==", "to be registered without an email address settings.hrm.email_required = False", "# Skills body = \"%s\\n%s\" % (row.purpose, body) rstable =", "Assigning Assets\", restricted = True, module_type = 10, )), #", "this to use Activities for projects settings.project.activities = True #", "is unique so rename old one name_exists.update_record(name=\"%s (Old)\" % name)", "#hidden = True, ), ] list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"),", "#widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink(", "\"group_id\"), (\"\", \"status_id\"), ], ), S3SQLInlineComponent( \"address\", label = T(\"Address\"),", "result = True if not r.component: table = s3db.org_group list_fields", "hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name == \"human_resource\": #", "correctly\" % r.component_id) # Default rss_import = None else: ctable", "dict(field = \"name\", options=\"Data\" ) ), S3SQLInlineComponent( \"contact\", name =", "details on People\", restricted = True, access = \"|1|\", #", "if not rss_url: if form.record: # Update form old_rss =", "= org_facility_onvalidation, ) return True s3.prep = custom_prep return attr", "activities', restricted = True, module_type = 4 )), # All", "= None, # Not displayed )), (\"inv\", Storage( name_nice =", ") filter_widgets = [ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ], label", "= None, )), (\"supply\", Storage( name_nice = T(\"Supply Chain Management\"),", "of Goods & Services\", # restricted = True, # module_type", "\"form\" in output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in output and hasattr(output[\"item\"],", "Task management settings.project.mode_task = False # Uncomment this to use", "\"\"\" s3db = current.s3db table = s3db.pr_group field = table.group_type", "Import Feed\"), \"poll\"), ], filterby = dict(field = \"contact_method\", options", "in types: marker = \"asset\" elif \"Residential Building\" in types:", "s3db.supply_item_pack_represent for item in items: item = \"%s %s %s\"", "for filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\", ) s3db.configure(\"pr_group\",", "Either Contact has changed Name or this feed is associated", "None if r.interactive or r.representation == \"aadata\": if not r.component:", "before prep \"\"\" s3db = current.s3db from s3 import S3SQLCustomForm,", ").first() no_import = current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if name_exists.url ==", "(including Admin) #settings.org.dependent_fields = { \\ # \"pr_person_details.mother_name\" : [],", "= \"\", hidden = True, ), ] s3db = current.s3db", "= T(\"URLs (media, fundraising, website, social media, etc.\"), fields =", "} # Uncomment to use an Autocomplete for Site lookup", "var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource", "Sunday settings.L10n.firstDOW = 0 # Number formats (defaults to ISO", "options = \"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form =", "# Context from a Profile page?\" organisation_id = get_vars.get(\"(organisation)\", None)", "label = T(\"Facility Type\"), fields = [(\"\", \"facility_type_id\")], multiple =", "name_nice = T(\"Requests\"), #description = \"Manage requests for supplies, assets,", "isinstance(output, dict): if \"form\" in output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in", "settings.auth.registration_requires_approval = True # Always notify the approver of a", "that of the site otable = s3db.org_site location_id = db(otable.site_id", "\",\" # Default Country Code for telephone numbers settings.L10n.default_country_code =", "settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser = \"NYC\" # Uncomment to Hide", ").first() try: pe_id = org.pe_id except: current.log.error(\"Org %s not found:", "pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form = crud_form, ) elif r.component_name ==", "r.component and (r.interactive or r.representation == \"aadata\"): from s3 import", "= req_req_postprocess if not r.component and r.method in (\"create\", \"update\"):", "into the Comments box for now # Ultimately should go", "#hidden = True, ), # @ToDo: Widget to handle Start", "(0, 1) ).first() if url_exists: # We have 2 feeds:", "settings.req.date_writable = False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose", "prepop return False if tablename in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\",", "[], # } # Uncomment to use an Autocomplete for", "# ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom prep", "# Uncomment this to request the Site when a user", "(& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled: # Nothing", "T(\"Letter\") # Restrict the Location Selector to just certain countries", "we should disable it old_rss = data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel", "Orgs S3SQLInlineComponent( \"organisation\", name = \"partner\", label = T(\"Partner Organizations\"),", "Emails settings.ui.auth_user_represent = \"name\" # Record Approval settings.auth.record_approval = True", "s3db = current.s3db table = r.table tablename = \"project_project\" table.code.label", "user is # registered in order to secure the deployment", "gluon.validators import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field =", "= \"Ordering & Purchasing of Goods & Services\", # restricted", "Sites settings.org.site_label = \"Facility\" #settings.org.site_label = \"Location\" # Uncomment to", "network name. You may use % as wildcard. Press 'Search'", "\"update\"): field.label = \"\" # Gets replaced by widget levels", "Multiple partner organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3 =", "s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add", "\"food\" elif \"Relief Site\" in types: marker = \"asset\" elif", "#description = \"Human Resources Management\", # restricted = True, #", "not r.component: hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\", ] if r.method", "T = current.T settings = current.deployment_settings \"\"\" Template settings for", "url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel (& associated", "= crud_form, onvalidation = org_facility_onvalidation, ) return True s3.prep =", "and Asset Management\", restricted = True, module_type = None, #", "= 10, # )), (\"req\", Storage( name_nice = T(\"Requests\"), #description", "else: result = True if not r.component: table = s3db.org_group", "S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name =", "r.record.pe_id) & \\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted ==", "to decide which Marker to use for Facilities Map @ToDo:", "the URL one to this Contact # and ensure active", "Redirect to member list when a new group has been", "to re-do list_fields as get over_written by hrm_group_controller() list_fields =", "marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1)", "priority = rtable.priority.represent(row.priority) date_required = row.date_required if date_required: date =", "for projects (called 'blurb' in NYC) settings.project.codes = True #", "= T(\"End Date\"), hide_time = True, #hidden = True, ),", "----------------------------------------------------------------------------- # Requests Management settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match", "filterby = dict(field = \"contact_method\", options = \"WORK_PHONE\" ) ),", "Users (inc Anonymous) can see this module in the default", "# Nothing to do :) return #else: # # Create", "s3db.msg_parser_enable(_id) # Check Now async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id])", "name_nice = T(\"Disaster Victim Registry\"), # #description = \"Allow affected", "# } # Uncomment to use an Autocomplete for Site", "= \"%s %s\" % (skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\" %", "if you need a simpler (but less accountable) process for", "Roles that newly-registered users get automatically #settings.auth.registration_roles = { 0:", "result s3.prep = custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller #", "(admin) user is # registered in order to secure the", "Don't include prepop return False if tablename in (\"cms_post\", \"org_facility\",", "can help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\",", "decide which Marker to use for Facilities Map @ToDo: Legend", "= T(\"Budgeting Module\"), # #description = \"Allows a Budget to", "old_rss = data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old = current.db(table.url ==", "\"Site Administration\", restricted = True, access = \"|1|\", # Only", "# Types common to both Send and Receive settings.inv.shipment_types =", "S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent = \"%(name)s\", #hidden = True,", "(T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets,", "yet work (form fails to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts", "T(\"Assessments\"), #description = \"Rapid Assessments & Flexible Impact Assessments\", restricted", "True, # module_type = 10, # )), (\"req\", Storage( name_nice", "body = row.comments if row.type == 1: # Items ritable", "= \"person_id\" # Uncomment to use Rich Text editor in", "menu )), (\"admin\", Storage( name_nice = T(\"Admin\"), #description = \"Site", "], label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"status_id\", label", "# Items ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id,", "\"Site Administration\", restricted = True, module_type = None # No", "mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return marker # ----------------------------------------------------------------------------- def", "form old_rss = form.record.sub_rsscontact import json data = old_rss =", "hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"),", "return result s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller", "that LatLons are within boundaries of their parent #settings.gis.check_within_parent_boundaries =", "= False # Types common to both Send and Receive", "= True settings.req.date_writable = False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable =", "this onvalidation table.name.notnull = False table.name.requires = None crud_form =", "all still) module_type = 10 )), (\"org\", Storage( name_nice =", "# Uncomment to use an Autocomplete for Site lookup fields", "tablename = \"org_facility\" table = s3db[tablename] if not r.component and", "try: pe_id = org.pe_id except: current.log.error(\"Org %s not found: cannot", "name_nice = T(\"Home\"), restricted = False, # Use ACLs to", "settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- # CMS # Uncomment to", "# Uncomment this to request the Mobile Phone when a", "s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\",", "not as appropriate # Name field is unique so rename", "default menu & access the controller module_type = None #", "% marker elif reqs == 1: # Low marker =", "# Thousands separator for numbers (defaults to space) settings.L10n.thousands_separator =", "to do :) return else: # Enable channel (& associated", "to use org_group_id in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment", "label = T(\"Network\"), #filter = True, #header = \"\", hidden", "= False # Multiple partner organizations settings.project.multiple_organisations = True def", "Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short =", "registers settings.auth.registration_requests_organisation = True # Uncomment this to request the", "and ensure active or not as appropriate # Name field", "Activities for projects settings.project.activities = True # Uncomment this to", "size of widget from s3 import s3_comments_widget table.description.widget = s3_comments_widget", "# PDF to Letter settings.base.paper_size = T(\"Letter\") # Restrict the", "T(\"Service\"), #hidden = True, ), # activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\",", "Survey module #(\"building\", Storage( # name_nice = T(\"Building Assessments\"), #", "marker = \"office\" if settings.has_module(\"req\"): # Colour code by open/priority", "\"\"\" if hasattr(row, \"pr_group\"): row = row.pr_group try: group_id =", "[\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match = False #settings.req.use_commit = False settings.req.requester_optional", "box for now # Ultimately should go into location_id$addr_street fields", "address? settings.auth.registration_requires_verification = True # Do new users need to", "\"\"\" Customise pr_group resource (in group & org_group controllers) -", "1: T(\"Other Warehouse\") } settings.inv.send_types = { #21: T(\"Distribution\") }", "\"location_id\", ) s3db.configure(tablename, crud_form = crud_form, onvalidation = org_facility_onvalidation, )", "crud_form = crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource", "resource (in group & org_group controllers) - runs after controller", "name_exists.update_record(name=\"%s (Old)\" % name) if name_exists.enabled: # Disable channel (&", "# and ensure active or not as appropriate # Name", "certain countries # NB This can also be over-ridden for", "use Rich Text editor in Newsfeed settings.cms.richtext = True #", "for Organisations in HR module #settings.hrm.organisation_label = \"National Society /", "f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess", "\"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"phone\", label =", "and not rss.poll: # Remember that we don't wish to", "Volunteers to be registered without an email address settings.hrm.email_required =", "Phone' settings.ui.label_mobile_phone = \"Cell Phone\" # Enable this to change", "for unauthenticated users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL =", "# Nothing to do :) return # Check if we", "rss_url: # No change to either Contact Name or URL", "= customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def chairperson(row): \"\"\" Virtual", "in HR module #settings.hrm.organisation_label = \"National Society / Branch\" settings.hrm.organisation_label", "S3LocationSelectorWidget2(levels = (\"L2\",), points = True, polygons = True, )", "to use Rich Text editor in Newsfeed settings.cms.richtext = True", "Assessments & Flexible Impact Assessments\", restricted = True, module_type =", "Receives Alerts via Email & SMS\", restricted = True, #", "if not current.auth.user: # Don't include prepop return False if", "field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True,", "= True, # module_type = 10, # )), (\"req\", Storage(", "return else: # Update the URL name_exists.update_record(url=rss_url) if no_import: if", "= \"%m-%d-%Y\" # Start week on Sunday settings.L10n.firstDOW = 0", "restricted = True, # module_type = 10, # )), #", "# @ToDo: Port these Assessments to the Survey module #(\"building\",", "= 4 )), # All modules below here should be", "Function to decide which Marker to use for Facilities Map", "table = s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby", "This item is not shown in the menu )), (\"admin\",", "= T(\"Home\"), restricted = False, # Use ACLs to control", "settings.auth.record_approval_required_for = (\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit def audit_write(method, tablename,", "T(\"Synchronization\"), #description = \"Synchronization\", restricted = True, access = \"|1|\",", "access = None, # All Users (inc Anonymous) can see", "empty list => disabled for all (including Admin) #settings.org.dependent_fields =", "gluon import current from gluon.html import A, URL from gluon.storage", "Matches against Inventories where supplies are requested.\", restricted = True,", "Send and Receive settings.inv.shipment_types = { 1: T(\"Other Warehouse\") }", "= \"name\", options=\"Data\" ) ), S3SQLInlineComponent( \"contact\", name = \"twitter\",", "(\"create\", \"update\"): field.label = \"\" # Gets replaced by widget", "field.label = \"\" # Gets replaced by widget #field.requires =", "by by group name, description or comments and by network", "label = T(\"Photo\"), # multiple = False, # fields =", "Organisation when a user registers settings.auth.registration_requests_organisation = True # Uncomment", "in preferential order if \"Hub\" in types: marker = \"warehouse\"", "(\"org\", Storage( name_nice = T(\"Locations\"), #description = 'Lists \"who is", "Assets\", restricted = True, module_type = 10, )), # Vehicle", "re-do list_fields as get over_written by hrm_group_controller() list_fields = [(T(\"Network\"),", "\"acronym\"], label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label", "NB This is labelled 'Role' in DRRPP ], filterby =", "& \\ (mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name,", "= 10 # )), # @ToDo: Port these Assessments to", "list_fields as get over_written by hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"),", "row.date_required if date_required: date = rtable.date_required.represent(date_required) title = \"%(priority)s by", "users need to verify their email address? settings.auth.registration_requires_verification = True", "# -*- coding: utf-8 -*- try: # Python 2.7 from", "= current.response.s3 # Tell the client to request per-feature markers", "channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check if", "= 10 )), #(\"proc\", Storage( # name_nice = T(\"Procurement\"), #", "# ----------------------------------------------------------------------------- # Groups def chairperson(row): \"\"\" Virtual Field to", "in Newsfeed settings.cms.person = \"person_id\" # Uncomment to use Rich", "# ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs after crud_form completes -", "module_type = 10 # )), #(\"dvr\", Storage( # name_nice =", "= T(\"Ticket Viewer\"), #description = \"Needed for Breadcrumbs\", restricted =", "manage surveys.\", restricted = True, module_type = 5, )), #(\"cr\",", "label = T(\"Neighborhood\"), levels = (\"L3\", \"L4\"), #hidden = True,", "# \"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\" : [], # \"pr_person_details.company\"", "import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP)", "[\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename,", "= False, fields = [(\"\", \"value\")], filterby = dict(field =", "be accessed from other modules. module_type = None, )), (\"supply\",", "settings.hrm.use_certificates = False # Uncomment to disable the use of", "the profile page settings.cms.location_click_filters = True # Uncomment to use", "return else: # Check if we already have a channel", "= T(\"Services\"), field = \"service_id\", # activate hierarchical org_service: #leafonly", "organisation_id instead of created_by in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" #", "T(\"Address\"), multiple = False, # This is just Text -", "s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record", "5, )), (\"event\", Storage( name_nice = T(\"Events\"), #description = \"Activate", "label = T(\"Network\"), represent = \"%(name)s\", #hidden = True, ),", "gluon.html import DIV, INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent if", "req/create form # Hide most Fields from s3 import S3SQLCustomForm,", "partner organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3 = current.response.s3", "in (\"create\", \"update\"): get_vars = r.get_vars # Context from a", "r.interactive or r.representation == \"aadata\": if not r.component: from s3", "\"%(name)s\", #hidden = True, ), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels", "label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label =", "# Check Now async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\",", "to request the Site when a user registers #settings.auth.registration_requests_site =", "# ----------------------------------------------------------------------------- # Requests Management settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"]", "\"profile\", # options=[True] # ) # ), ] list_fields =", "[\"name\", \"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields = list_fields, )", "\"comments\", ] s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields", "= T(\"Shelters\"), # #description = \"Tracks the location, capacity and", "Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities settings.req.summary", "& Purchasing of Goods & Services\", # restricted = True,", "'''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script)", "to do :) return # Check if we already have", "Customise pr_group resource (in group & org_group controllers) - runs", "= [(\"\", \"group_id\")], multiple = False, ), \"job_title_id\", \"start_date\", )", "or other resources. Matches against Inventories where supplies are requested.\",", "to request the Organisation when a user registers settings.auth.registration_requests_organisation =", "= customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects # Use codes for", "-*- try: # Python 2.7 from collections import OrderedDict except:", "Assessments\", # restricted = True, # module_type = 10, #", "no_import: if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\",", "hierarchical org_service: #from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets", "for Breadcrumbs\", restricted = False, module_type = None # No", "from s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method != \"read\": from", "= T(\"Network\"), #hidden = True, ), ] # Need to", "settings.hrm.email_required = False # Uncomment to allow Staff & Volunteers", "# Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import", "= row.date_required if date_required: date = rtable.date_required.represent(date_required) title = \"%(priority)s", "of HR Skills #settings.hrm.use_skills = False # Uncomment to disable", ") list_fields = [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"),", "r.table tablename = \"project_project\" table.code.label = T(\"Project blurb (max. 100", "= table.meetings.writable = True # Increase size of widget from", "in NYC) settings.project.codes = True # Uncomment this to use", "T(\"Email\"), multiple = False, fields = [(\"\", \"value\")], filterby =", "URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource # -----------------------------------------------------------------------------", "db = current.db name = form_vars.name table = s3db.msg_rss_channel name_exists", "get over_written by hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\",", "False settings.req.requester_optional = True settings.req.date_writable = False settings.req.item_quantities_writable = True", "Call standard prep if callable(standard_prep): result = standard_prep(r) if not", "= T(\"RSS\"), multiple = False, fields = [(\"\", \"value\"), #(T(\"Don't", "s3db = current.s3db table = s3db.pr_group field = table.group_type field.default", "s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource =", "separately for the menu )), (\"appadmin\", Storage( name_nice = T(\"Administration\"),", "= \",\" # Default Country Code for telephone numbers settings.L10n.default_country_code", "T(\"Description\") table.meetings.readable = table.meetings.writable = True # Increase size of", "hide_time = True, #hidden = True, ), ] list_fields =", "a new group has been created create_next = URL(c=\"hrm\", f=\"group\",", "elif \"Relief Site\" in types: marker = \"asset\" elif \"Residential", "# )), # @ToDo: Port these Assessments to the Survey", "#description = \"Central point to record details on People\", restricted", "custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- # Requests", "), T(\"Don't Import Feed\")), name = \"rss\", label = T(\"RSS\"),", "(EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"phone\", label", "where\". Allows relief agencies to coordinate their activities', restricted =", "Read the full record row = db(rtable.id == req_id).select(rtable.type, rtable.site_id,", "Use a hierarchical dropdown instead of AC field.widget = None", "marker mtable = db.gis_marker try: marker = db(mtable.name == marker).select(mtable.image,", "Warehouse\") } settings.inv.send_types = { #21: T(\"Distribution\") } settings.inv.send_type_default =", "settings.L10n.date_format = \"%m-%d-%Y\" # Start week on Sunday settings.L10n.firstDOW =", "s3db.configure(\"org_group\", list_fields = list_fields, ) if r.interactive: from gluon.html import", "# This item is handled separately for the menu )),", "get automatically #settings.auth.registration_roles = { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"),", "can search by by group name, description or comments and", "s3.prep = custom_prep # Custom postp standard_postp = s3.postp def", "Uncomment this to request the Site when a user registers", "HR Skills #settings.hrm.use_skills = False # Uncomment to disable the", "facility_marker_fn(record): \"\"\" Function to decide which Marker to use for", "== \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return marker", "skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for", "= T(\"Assets\"), #description = \"Recording and Assigning Assets\", restricted =", "#label = T(\"Service\"), #hidden = True, ), # activate hierarchical", "filter_widgets, ) field = r.table.site_id # Don't assume that user", "handled separately for the menu )), # Uncomment to enable", "parent #settings.gis.check_within_parent_boundaries = False # GeoNames username settings.gis.geonames_username = \"eden_nyc\"", "links in IFrames open a full page in a new", "None else: # Create form: Default rss_import = None crud_form", "cannot set rss_import correctly\" % r.component_id) # Default rss_import =", "until tested settings.ui.datatables_responsive = False # PDF to Letter settings.base.paper_size", "in Shelters\", # restricted = True, # module_type = 10", "options = \"FACEBOOK\" ) ), \"comments\", postprocess = pr_contact_postprocess, )", "and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: # Nothing to do", "2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from", "s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: #", "s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check if we already have a", "= db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url, limitby = (0,", "settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False #settings.req.use_req_number", "# Extra fields to search in Autocompletes & display in", "\"site_id\", ] if r.method in (\"create\", \"update\"): get_vars = r.get_vars", "the Street Address \"\"\" form_vars = form.vars name = form_vars.get(\"name\",", "Parser table = s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id)", "checking that LatLons are within boundaries of their parent #settings.gis.check_within_parent_boundaries", "HTML is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return", "Post ptable = s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id,", "True, module_type = 5, )), #(\"cr\", Storage( # name_nice =", "True, ), ] # Need to re-do list_fields as get", "should go into location_id$addr_street fields = [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget(", "Goods & Services\", # restricted = True, # module_type =", "1st (admin) user is # registered in order to secure", "# module_type = 10, # )), (\"req\", Storage( name_nice =", "(T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"),", "return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3 #", "separator for numbers (defaults to ,) settings.L10n.decimal_separator = \".\" #", "= S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) == \"popup\": # Coming from", "(0, 1) ).first() if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return", "= \"2\" ) ), S3SQLInlineComponent( \"document\", name = \"media\", label", "True # Uncomment to adjust filters in Newsfeed when clicking", "a channel for this Contact db = current.db name =", "of Organisation Groups settings.org.groups = \"Network\" # Make Services Hierarchical", "# e.g. Activities filtered to those of parent Project settings.gis.countries", "to show Tags in Newsfeed settings.cms.show_tags = True # Uncomment", "True, module_type = 5, )), (\"event\", Storage( name_nice = T(\"Events\"),", "= row.pr_group try: group_id = row.id except: # not available", "T(\"Procurement\"), # #description = \"Ordering & Purchasing of Goods &", "they create field.default = None # Use a hierarchical dropdown", "module_type = None, )), (\"msg\", Storage( name_nice = T(\"Messaging\"), #description", "[ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ], label = T(\"Search\"), comment", "# Use a hierarchical dropdown instead of AC field.widget =", "# Don't assume that user is from same org/site as", "def customise_project_project_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep =", "\"organisation\", name = \"partner\", label = T(\"Partner Organizations\"), fields =", "settings.project.sectors = False # Multiple partner organizations settings.project.multiple_organisations = True", "False, ), \"job_title_id\", \"start_date\", ) list_fields = [\"id\", \"person_id\", \"job_title_id\",", "_class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent =", "list_fields = list_fields, ) elif r.component_name == \"organisation\": # Add", "settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\" Customise", "# \"pr_person_details.father_name\" : [], # \"pr_person_details.company\" : [], # \"pr_person_details.affiliations\"", "channel_id) # Setup Parser table = s3db.msg_parser _id = table.insert(channel_id=channel_id,", "for Offices/Facilities settings.req.summary = True # ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\"", "address settings.hrm.email_required = False # Uncomment to allow Staff &", "= True settings.req.items_ask_purpose = False #settings.req.use_req_number = False # Label", "T(\"Administration\"), #description = \"Site Administration\", restricted = True, module_type =", "= (\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment to hide inv &", "(\"create\", \"update\"): script = \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var", "on Assets #(\"vehicle\", Storage( # name_nice = T(\"Vehicles\"), # #description", "(T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form =", "= dict(id=_id) s3db.update_super(ptable, record) # Add source link url =", "# Create Post ptable = s3db.cms_post _id = ptable.insert(series_id=series_id, title=title,", "= True s3db = current.s3db #if r.method == \"validate\": #", "parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: # Update the URL name_exists.update_record(url=rss_url)", "form_vars.get(\"name\", None) if name: return address = form_vars.get(\"address\", None) if", ")), (\"doc\", Storage( name_nice = T(\"Documents\"), #description = \"A library", "= True # Uncomment to use have Filter form in", "settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3", "= False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None) if site_id: field", "{ 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # }", "table.location_id field.label = \"\" # Gets replaced by widget #field.requires", "OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from", "# This is just Text - put into the Comments", "dict(priority=priority, date=date) else: title = priority body = row.comments if", "#settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function to", "] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"phone\", label = MOBILE,", "S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename == \"org_organisation\": if", "S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else:", "Don't include Email/Phone for unauthenticated users if current.auth.is_logged_in(): MOBILE =", "[(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name =", "the label for 'Staff' settings.hrm.staff_label = \"Contacts\" # Uncomment to", "HR Credentials settings.hrm.use_credentials = False # Uncomment to enable the", "= False # Default timezone for users settings.L10n.utc_offset = \"UTC", "the Request details priority = rtable.priority.represent(row.priority) date_required = row.date_required if", "attr settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def chairperson(row):", "\"residence\" #elif \"Shelter\" in types: # marker = \"shelter\" else:", "limitby=(0, 1)).first() if chair: # Only used in list view", "= db.gis_marker try: marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width,", "= T(\"You can search by by group name, description or", "\"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields", "table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table, record) # Enable", "\"aadata\"): from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db", "2: # Medium marker = \"%s_yellow\" % marker elif reqs", "= \"facebook\", label = T(\"Facebook\"), multiple = False, fields =", "(ctable.deleted == False) rss = db(query).select(ctable.poll, limitby=(0, 1) ).first() if", "name = \"iCal\", label = \"iCAL\", multiple = False, fields", "T(\"How people can help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form =", "dict(field = \"profile\", # options=[True] # ) # ), ]", "module_type = 5, )), #(\"cr\", Storage( # name_nice = T(\"Shelters\"),", "dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not r.component", "old_rss).select(table.channel_id, table.enabled, limitby = (0, 1) ).first() if old and", "approver of a new (verified) user, even if the user", "in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget", "URL if no_import: if name_exists.enabled: # Disable channel (& associated", "----------------------------------------------------------------------------- # Comment/uncomment modules here to disable/enable them settings.modules =", "fundraising, website, social media, etc.\"), fields = [\"document_id\", \"name\", \"url\",", "- @ToDo: Send out Tweets \"\"\" req_id = form.vars.id db", "url = \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url,", "(r.interactive or r.representation == \"aadata\"): from s3 import S3SQLCustomForm, S3SQLInlineComponent,", "# Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return #", "T(\"NYC Prepared\") # Theme (folder to use for views/layout.html) settings.base.theme", "def chairperson(row): \"\"\" Virtual Field to show the chairperson of", "\"name\", options=\"Data\" ) ), S3SQLInlineComponent( \"contact\", name = \"twitter\", label", "org/site as Contacts they create field.default = None # Use", "T(\"Don't Import Feed\")), name = \"rss\", label = T(\"RSS\"), multiple", "S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"),", "this to change the label for 'Mobile Phone' settings.ui.label_mobile_phone =", "Medium marker = \"%s_yellow\" % marker elif reqs == 1:", "the Mobile Phone when a user registers settings.auth.registration_requests_mobile_phone = True", "this Contact # and ensure active or not as appropriate", "db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except: # Prepop", "the URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: # Disable channel", "= 3, )), #(\"vol\", Storage( # name_nice = T(\"Volunteers\"), #", "notify the approver of a new (verified) user, even if", "[(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"),", "\"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields", "S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\",", "= (0, 1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\", None) if name_exists:", "label = T(\"Theme\"), # #hidden = True, # ), S3LocationFilter(\"location.location_id\",", "), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden = True, ), ]", "db = current.db gtable = db.gis_location query = (gtable.name ==", "Setup Parser table = s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True)", ") # ), ] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\",", "[], # \"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [], #", "source link url = \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id))", "except: marker = db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0,", "= row.id except: # not available return current.messages[\"NONE\"] db =", "\"Cell Phone\" # Enable this to change the label for", "resources, such as photos, documents and reports\", restricted = True,", "if callable(standard_postp): output = standard_postp(r, output) if r.interactive and isinstance(output,", "label = T(\"Data\"), multiple = False, fields = [(\"\", \"url\")],", "#description = \"Building Safety Assessments\", # restricted = True, #", "# marker = \"shelter\" else: # Unknown marker = \"office\"", "\\ (ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first()", "limitby=(0, 1) ).first() if rss and not rss.poll: # Remember", "multiple = False, fields = [(\"\", \"url\")], filterby = dict(field", "# module_type = 10 # )), (\"asset\", Storage( name_nice =", "(in facility, human_resource, organisation & person controllers) - runs after", "\\ (mtable.group_head == True) & \\ (mtable.person_id == ptable.id) chair", "def pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS Feeds \"\"\" s3db =", "chage the label for 'Staff' settings.hrm.staff_label = \"Contacts\" # Uncomment", "settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment to use org_group_id in Newsfeed", "31-0) # Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator", "= True, module_type = 10, )), (\"doc\", Storage( name_nice =", "True if r.interactive or r.representation == \"aadata\": if not r.component:", "False settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- # CMS # Uncomment", "it's main purpose is to be accessed from other modules.", "if we already have a channel for this URL url_exists", "default menu (access to controller is possible to all still)", "#_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden =", "org_group_id in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment to use", "= T(\"Vehicles\"), # #description = \"Manage Vehicles\", # restricted =", "1 settings.inv.item_status = { #0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"),", "result = True if r.interactive: if r.component_name == \"facility\": if", "label = T(\"Categories\"), field = \"activity_type_id\", cols = 3, #", "types: marker = \"hospital\" elif \"Food\" in types: marker =", "label of \"Teams\" to \"Groups\" settings.hrm.teams = \"Groups\" # Custom", "table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check Now async = current.s3task.async", "restricted = False, # module_type = 10, # )), #(\"member\",", "= get_vars.get(\"(site)\", None) if site_id: field = s3db.hrm_human_resource.site_id field.default =", "= \"\" # Gets replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels", "10 )), (\"org\", Storage( name_nice = T(\"Locations\"), #description = 'Lists", "S3DateFilter(\"end_date\", label = T(\"End Date\"), hide_time = True, #hidden =", "100 table.comments.label = T(\"How people can help\") script = '''$('#project_project_code').attr('maxlength','100')'''", "= s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent =", "False, fields = hr_fields, ), #S3SQLInlineComponent( # \"image\", # name", "= \"Support Requests\", # restricted = True, # module_type =", "db = current.db mtable = current.s3db.pr_group_membership ptable = db.pr_person query", "need a simpler (but less accountable) process for managing stock", "\"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom", "elif name_exists.enabled: # Nothing to do :) return else: #", "False, #widget = \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"),", "s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields,", "from gluon.html import DIV, INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm,", "manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default = manhattan.id", "# ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group resource (in", "of datatables # - Disabled until tested settings.ui.datatables_responsive = False", "r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types: # Hide Private Residences from", "created create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource =", "= T(\"Volunteers\"), # #description = \"Human Resources Management\", # restricted", "space) settings.L10n.thousands_separator = \",\" # Default Country Code for telephone", "if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable =", "= T(\"Messaging\"), #description = \"Sends & Receives Alerts via Email", "crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource =", "Send out Tweets \"\"\" req_id = form.vars.id db = current.db", "attr[\"native\"] = True return attr settings.customise_org_group_controller = customise_org_group_controller # -----------------------------------------------------------------------------", "True, module_type = 4 )), # All modules below here", "= T(\"Email\") list_fields += [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3,", "in the newswire - @ToDo: Send out Tweets \"\"\" req_id", "hidden = True, ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter =", "channel for this URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id,", "= S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label =", ") return True s3.prep = custom_prep return attr settings.customise_org_facility_controller =", "crud_form = S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name =", "them settings.modules = OrderedDict([ # Core modules which shouldn't be", "completes - creates a cms_post in the newswire - @ToDo:", "'''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script)", "don't want inconsistent across tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return", "(\"msg\", Storage( name_nice = T(\"Messaging\"), #description = \"Sends & Receives", "# fields = [(\"\", \"image\")], # filterby = dict(field =", "elif \"item\" in output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output", "= True, # module_type = 10 # )), #(\"dvr\", Storage(", "for the URL # Disable the old Contact one and", "(\"project\", Storage( name_nice = T(\"Projects\"), #description = \"Tracking of Projects,", "\"NYC\" # Uncomment to Hide the language toolbar settings.L10n.display_toolbar =", "return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"] # -----------------------------------------------------------------------------", "= \"\", hidden = True, ), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"),", "Newsfeed settings.cms.person = \"person_id\" # Uncomment to use Rich Text", "# label = T(\"Photo\"), # multiple = False, # fields", "= \"image\", # label = T(\"Photo\"), # multiple = False,", "in output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp =", "the use of HR Credentials settings.hrm.use_credentials = False # Uncomment", "Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result s3.prep", "and Receive settings.inv.shipment_types = { 1: T(\"Other Warehouse\") } settings.inv.send_types", "\"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form =", "for row in rows] # Use Marker in preferential order", "IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label", "pack_represent = s3db.supply_item_pack_represent for item in items: item = \"%s", "to handle Start & End in 1! S3DateFilter(\"start_date\", label =", "= \"contact_method\", options = \"FACEBOOK\" ) ), \"comments\", postprocess =", "standard prep if callable(standard_prep): result = standard_prep(r) else: result =", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"SMS\")),", "True else: # Don't Audit non user-visible resources return False", "version, but don't want inconsistent across tabs # s3db.pr_group_membership.group_head.label =", "import s3_comments_widget table.description.widget = s3_comments_widget from gluon import Field table.chairperson", ")), # @ToDo: Port these Assessments to the Survey module", "----------------------------------------------------------------------------- # Groups def chairperson(row): \"\"\" Virtual Field to show", "#settings.req.use_commit = False settings.req.requester_optional = True settings.req.date_writable = False settings.req.item_quantities_writable", "settings.cms.show_links = True # Uncomment to show Tags in Newsfeed", "r.component and r.method in (\"create\", \"update\"): script = \\ '''$('#req_req_site_id').change(function(){", "= MOBILE, multiple = False, fields = [(\"\", \"value\")], filterby", "= [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods Served\"),", "Default timezone for users settings.L10n.utc_offset = \"UTC -0500\" # Uncomment", "name = \"rss\", label = T(\"RSS\"), multiple = False, fields", "label = T(\"Address\"), multiple = False, # This is just", "True # Label for Inventory Requests settings.req.type_inv_label = \"Supplies\" #", "field.default = organisation_id field.readable = field.writable = False hr_fields.remove(\"organisation_id\") site_id", "user registers settings.auth.registration_requests_organisation = True # Uncomment this to request", "been created create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource", "restricted = True, # module_type = None # This item", "go into location_id$addr_street fields = [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\",", "\"EMAIL\" ) ), \"website\", S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\",", "_class = \"filter-search\", ), S3OptionsFilter(\"status_id\", label = T(\"Status\"), # Not", "3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"), # #hidden =", "\"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\" :", "ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable,", "Location is that of the site otable = s3db.org_site location_id", "site_id = get_vars.get(\"(site)\", None) if site_id: field = s3db.hrm_human_resource.site_id field.default", "s3_comments_widget table.description.widget = s3_comments_widget from gluon import Field table.chairperson =", "\"item\" in output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp", "= 1, )), (\"project\", Storage( name_nice = T(\"Projects\"), #description =", "None) if site_id: field = s3db.hrm_human_resource.site_id field.default = site_id field.readable", "to be drawn up\", # restricted = True, # module_type", "modules. module_type = None, )), (\"supply\", Storage( name_nice = T(\"Supply", "to coordinate their activities', restricted = True, module_type = 4", "1)).first() if manhattan: field.default = manhattan.id table.mission.readable = table.mission.writable =", "new (verified) user, even if the user is automatically approved", "] if r.method in (\"create\", \"update\"): get_vars = r.get_vars #", "True return attr settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- # Persons", "s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) & \\ (ctable.contact_method ==", "for the Contact & 1 for the URL # Disable", "1) ).first() return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default", "\"human_resource\", name = \"human_resource\", label = \"\", multiple = False,", "module_type = 10 )), (\"assess\", Storage( name_nice = T(\"Assessments\"), #description", "org_service: #from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets =", "S3SQLInlineComponent( \"contact\", name = \"facebook\", label = T(\"Facebook\"), multiple =", "= \"Sends & Receives Alerts via Email & SMS\", restricted", "filterby = dict(field = \"name\", options=\"Data\" ) ), S3SQLInlineComponent( \"contact\",", "T(\"Team\"), filter = True, header = \"\", hidden = True,", "s3 import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if", "try: # Python 2.7 from collections import OrderedDict except: #", "link the URL one to this Contact # and ensure", "same org/site as Contacts they create r.component.table.site_id.default = None return", "projects settings.project.activities = True # Uncomment this to use Milestones", "to do :) return #else: # # Create a new", "\"meetings\", ] s3db.configure(\"org_group\", list_fields = list_fields, ) if r.interactive: from", "T(\"Services\"), field = \"service_id\", # activate hierarchical org_service: #leafonly =", "= \"Location\" # Uncomment to show the date when a", "users get automatically #settings.auth.registration_roles = { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to =", "\"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"email\", label =", "# Hide most Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent #", "S3SQLInlineComponent( \"location\", label = T(\"Location\"), fields = [(\"\", \"location_id\")], ),", "fields = [\"document_id\", \"name\", \"url\", \"comments\", ], filterby = dict(field", "org_facility_onvalidation, ) return True s3.prep = custom_prep return attr settings.customise_org_facility_controller", "(folder to use for views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row =", "False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None) if site_id: field =", "None # Location is that of the site otable =", "field = \"service_id\", # activate hierarchical org_service: #leafonly = False,", "if url_exists: # We have 2 feeds: 1 for the", "S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) == \"popup\": # Coming from req/create", "use settings suitable for detailed Task management settings.project.mode_task = False", "# Create form: Default rss_import = None else: # Component", "possible to all still) module_type = 10 )), (\"org\", Storage(", "= \"EMAIL\" ) ), \"website\", S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\",", "limitby=(0, 1) ).first().id except: # Prepop hasn't been run series_id", "#settings.org.dependent_fields = { \\ # \"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\"", "= \"profile\", # options=[True] # ) # ), ] list_fields", "full record row = db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority,", "settings.org.groups = \"Network\" # Make Services Hierarchical settings.org.services_hierarchical = True", "standard_prep(r) else: result = True s3db = current.s3db #if r.method", "Residences from s3 import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private", "and distributions\", # restricted = False, # module_type = 10,", "and Sending Items\", restricted = True, module_type = 10 )),", "if r.interactive or r.representation == \"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable", "list_fields = list_fields, ) if r.interactive: from gluon.html import DIV,", "s3db = current.s3db form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or", "Autocompletes & display in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", )", "if not types: # Hide Private Residences from s3 import", "= True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False #settings.req.use_req_number =", "= \"contact_method\", options = \"FACEBOOK\" ) ), \"meetings\", \"comments\", postprocess", "allow Staff & Volunteers to be registered without an email", "series_id = db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except:", "channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled: #", "(\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True,", "date when a Site (Facilities-only for now) was last contacted", "True, ), S3OptionsFilter(\"site_id\", hidden = True, ), S3OptionsFilter(\"training.course_id\", label =", "s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\",", "coding: utf-8 -*- try: # Python 2.7 from collections import", "\"\", hidden = True, ), S3LocationFilter(\"location_id\", label = T(\"Location\"), levels", "= None # Use a hierarchical dropdown instead of AC", "field.writable = False table.name.label = T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable", "True # Uncomment to show Links in Newsfeed settings.cms.show_links =", "list_fields = list_fields, ) return result s3.prep = custom_prep return", "import Field table.chairperson = Field.Method(\"chairperson\", chairperson) # Format for filter_widgets", "FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if r.interactive: tablename", ": T(\"United States Dollars\"), } settings.L10n.languages = OrderedDict([ (\"en\", \"English\"),", "settings.L10n.default_country_code = 1 # Enable this to change the label", "async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- # Human", "req_req_postprocess(form): \"\"\" Runs after crud_form completes - creates a cms_post", "(item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\" % (item, body) else:", ": [], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [], # } # Uncomment", "filter_widgets = [ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ], label =", "False # Uncomment to disable the use of HR Credentials", "s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter([\"name\",", "return output s3.postp = custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller", "= audit_write # ----------------------------------------------------------------------------- # CMS # Uncomment to use", "result: return False from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm,", "= { #0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"),", "= { \"USD\" : T(\"United States Dollars\"), } settings.L10n.languages =", "\"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field =", "= \"%(name)s\", #hidden = True, ), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"),", "settings.L10n.thousands_separator = \",\" # Default Country Code for telephone numbers", "out Tweets \"\"\" req_id = form.vars.id db = current.db s3db", "def custom_postp(r, output): # Call standard postp if callable(standard_postp): output", "Uncomment this to disable Sectors in projects settings.project.sectors = False", "= (ctable.pe_id == pe_id) & \\ (ctable.contact_method == \"RSS\") &", "\"asset\" elif \"Residential Building\" in types: marker = \"residence\" #elif", "= db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill", "System\", restricted = True, module_type = 10, )), (\"doc\", Storage(", "name = \"phone\", label = T(\"Phone\"), multiple = False, fields", "label = T(\"Type\"), #hidden = True, ), ] list_fields =", "module_type = 5, )), (\"event\", Storage( name_nice = T(\"Events\"), #description", "documents and reports\", restricted = True, module_type = None, )),", "the menu )), (\"admin\", Storage( name_nice = T(\"Admin\"), #description =", "Mobile Phone when a user registers settings.auth.registration_requests_mobile_phone = True #", "= \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser = \"NYC\" # Uncomment", "#hidden = True, ), ] list_fields = [\"id\", \"name\", \"code\",", "registered without an email address settings.hrm.email_required = False # Uncomment", "\"English\"), (\"es\", \"Español\"), ]) # Authentication settings # These settings", "True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden = True,", "(access to controller is possible to all still) module_type =", "to enable Summary 'Site Needs' tab for Offices/Facilities settings.req.summary =", "disable the use of HR Credentials settings.hrm.use_credentials = False #", "\"phone\", label = MOBILE, multiple = False, fields = [(\"\",", "= r.table.site_id # Don't assume that user is from same", "Assessments\"), # #description = \"Building Safety Assessments\", # restricted =", "\"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ], label = T(\"Name\"), _class =", "DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import, ), T(\"Don't Import Feed\")), name", "output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return output s3.postp = custom_postp return", "row.id except: # not available return current.messages[\"NONE\"] db = current.db", "Function & Table ACLs # Enable this to have Open", "\"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields =", "name: return address = form_vars.get(\"address\", None) if address: form_vars.name =", "current.s3db rtable = s3db.req_req # Read the full record row", "on People\", restricted = True, access = \"|1|\", # Only", "(\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget =", "True, polygons = True, ) # Default location to Manhattan", "title = \"%(priority)s by %(date)s\" % dict(priority=priority, date=date) else: title", "= S3LocationSelectorWidget2(levels = (\"L2\",), points = True, polygons = True,", "settings.L10n.display_toolbar = False # Default timezone for users settings.L10n.utc_offset =", "= \"%(name)s\", cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label =", "True, ), \"name\", \"location_id\", ) s3db.configure(tablename, crud_form = crud_form, onvalidation", "Audit non user-visible resources return False settings.security.audit_write = audit_write #", "data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id, table.enabled,", "= S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields = [(\"\",", "DRRPP ], filterby = dict(field = \"role\", options = \"2\"", "T(\"Budgeting Module\"), # #description = \"Allows a Budget to be", "= \"Receiving and Sending Items\", restricted = True, module_type =", "standard_postp(r, output) if r.interactive and isinstance(output, dict): if \"rheader\" in", "of Projects, Activities and Tasks\", restricted = True, module_type =", "# empty list => disabled for all (including Admin) #settings.org.dependent_fields", "= 3 # Relief Team, to show up in hrm/group", "Geospatial Analysis\", restricted = True, module_type = 9, # 8th", "True settings.auth.record_approval_required_for = (\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit def audit_write(method,", "settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise", "s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP standard_prep = s3.prep def custom_prep(r):", "s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled: # Nothing to do :)", "\"org_organisation\": if r.id: # Update form ctable = s3db.pr_contact query", "= 10, )), (\"doc\", Storage( name_nice = T(\"Documents\"), #description =", "result s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller #", "= True, module_type = 5, )), #(\"cr\", Storage( # name_nice", "\"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name = \"phone\", label = T(\"Phone\"),", "\"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form = crud_form,", "req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for", "Contact\" # Filter Requester as being from the Site settings.req.requester_from_site", "= True # Uncomment to disable Staff experience settings.hrm.staff_experience =", "by widget levels = (\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget", "s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent =", "INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method != \"read\":", "chair: # Only used in list view so HTML is", "s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label", "restricted = True, module_type = 10 )), (\"assess\", Storage( name_nice", "= customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html import", "s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item in items: item =", "marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default the name to", "reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None)", "= T(\"Start Date\"), hide_time = True, #hidden = True, ),", "use of HR Description settings.hrm.use_description = False # Change the", "= 5, ), \"phone\", S3SQLInlineComponent( \"contact\", name = \"phone2\", label", "username settings.gis.geonames_username = \"eden_nyc\" # Uncomment to show created_by/modified_by using", "should disable it old_rss = data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old", "return #else: # # Create a new Feed # pass", "db = current.db s3db = current.s3db table = db.org_facility_type ltable", "T(\"Surplus\") } # ----------------------------------------------------------------------------- # Organisations # # Enable the", "= False # Uncomment to show the Organisation name in", "\"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form = crud_form, filter_widgets", "settings.L10n.firstDOW = 0 # Number formats (defaults to ISO 31-0)", ") s3db.configure(tablename, crud_form = crud_form, onvalidation = org_facility_onvalidation, ) return", "& Facilities).\", restricted = True, module_type = 10, )), (\"survey\",", "\"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent( \"location\", label", "field = table.location_id if r.method in (\"create\", \"update\"): field.label =", "= form.vars.id db = current.db s3db = current.s3db rtable =", "#represent = \"%(name)s\", cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label", "= 2, # )), (\"cms\", Storage( name_nice = T(\"Content Management\"),", "(\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit def audit_write(method, tablename, form, record,", "that user is from same org/site as Contacts they create", "& Geospatial Analysis\", restricted = True, module_type = 9, #", "= \"organisation_type_id\", label = T(\"Type\"), multiple = False, #widget =", "settings.cms.person = \"person_id\" # Uncomment to use Rich Text editor", "\"facility\": if r.method in (None, \"create\", \"update\"): from s3 import", "list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result s3.prep =", "\"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields =", "filterby = dict(field = \"contact_method\", options = \"FACEBOOK\" ) ),", "= hr_fields, ), #S3SQLInlineComponent( # \"image\", # name = \"image\",", "customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom", "name_nice = T(\"Documents\"), #description = \"A library of digital resources,", "standard_prep = s3.prep def custom_prep(r): # Call standard prep if", "if site_id: field = s3db.hrm_human_resource.site_id field.default = site_id field.readable =", "#\"currency\", \"comments\", ) from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter", "Module\"), # #description = \"Allows a Budget to be drawn", "output) if r.interactive and isinstance(output, dict): if \"form\" in output:", "customise_org_group_controller # ----------------------------------------------------------------------------- # Persons # Uncomment to hide fields", "else: # Update the URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled:", "activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"), # #hidden", "= field.writable = False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None) if", ").first() return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default the", "def customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group resource (in group &", "boundaries of their parent #settings.gis.check_within_parent_boundaries = False # GeoNames username", "# } settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy = 5 # Controller,", "= get_vars.get(\"(organisation)\", None) if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default =", "open by default settings.cms.filter_open = True # Uncomment to adjust", "(\"asset\", Storage( name_nice = T(\"Assets\"), #description = \"Recording and Assigning", "Persons # Uncomment to hide fields in S3AddPersonWidget settings.pr.request_dob =", "= dict(field = \"profile\", # options=[True] # ) # ),", "= True # Uncomment to not track pack values settings.inv.track_pack_values", "# \"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\"", "module_type = 10, # )), (\"req\", Storage( name_nice = T(\"Requests\"),", "# Filter Requester as being from the Site settings.req.requester_from_site =", "ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item", "= \"Manage Vehicles\", # restricted = True, # module_type =", "\"value\")], filterby = dict(field = \"contact_method\", options = \"TWITTER\" )", "settings.security.policy = 5 # Controller, Function & Table ACLs #", "\"document\", name = \"data\", label = T(\"Data\"), multiple = False,", "(\"pr\", Storage( name_nice = T(\"Person Registry\"), #description = \"Central point", "Society / Branch\" settings.hrm.organisation_label = \"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr):", "without input to list all.\"), #_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\",", "this to have Open links in IFrames open a full", "script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })'''", "show post Titles in Newsfeed settings.cms.show_titles = True # -----------------------------------------------------------------------------", "return True s3.prep = custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller", "filterby = dict(field = \"contact_method\", options = \"RSS\" ) ),", "Shelters\", # restricted = True, # module_type = 10 #", "for specific contexts later # e.g. Activities filtered to those", "to use person_id instead of created_by in Newsfeed settings.cms.person =", "multiple = False, ), \"meetings\", \"comments\", ) filter_widgets = [", "Assets #(\"vehicle\", Storage( # name_nice = T(\"Vehicles\"), # #description =", "settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match = False #settings.req.use_commit =", "#\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name = \"human_resource\", label = \"\",", "# name_nice = T(\"Building Assessments\"), # #description = \"Building Safety", "#field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels =", "\"comments\", ] s3db.configure(\"pr_group\", list_fields = list_fields, ) elif r.component_name ==", "r.method != \"read\": from gluon.validators import IS_EMPTY_OR from s3 import", "levels #settings.inv.direct_stock_edits = True # Uncomment to call Stock Adjustments,", "\"activity_type\", label = T(\"Categories\"), field = \"activity_type_id\", cols = 3,", "Victim Registry\"), # #description = \"Allow affected individuals & households", "module_type = 4 )), # All modules below here should", "parent Project settings.gis.countries = (\"US\",) settings.fin.currencies = { \"USD\" :", "do :) return # Check if we already have a", "restricted = True, access = \"|1|\", # Only Administrators can", "was last contacted settings.org.site_last_contacted = True # Enable certain fields", "item = \"%s %s\" % (skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\"", "last contacted settings.org.site_last_contacted = True # Enable certain fields just", "s3.jquery_ready.append(script) return result s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller =", "= T(\"Documents\"), #description = \"A library of digital resources, such", "\"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields = list_fields,", "Code for telephone numbers settings.L10n.default_country_code = 1 # Enable this", "settings should be changed _after_ the 1st (admin) user is", "\"value\")], filterby = dict(field = \"contact_method\", options = \"EMAIL\" )", "show the date when a Site (Facilities-only for now) was", "], label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter = True, header", "in the menu )), (\"pr\", Storage( name_nice = T(\"Person Registry\"),", ") settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import", "of their parent #settings.gis.check_within_parent_boundaries = False # GeoNames username settings.gis.geonames_username", "responsive behavior of datatables # - Disabled until tested settings.ui.datatables_responsive", "name_exists = db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url, limitby =", "hr_fields, ), #S3SQLInlineComponent( # \"image\", # name = \"image\", #", "T(\"Contacts\"), #description = \"Human Resources Management\", restricted = True, module_type", "result s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller #", "\"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"), fields = [(\"\",", ": [], # } # Uncomment to use an Autocomplete", "T(\"Distribution\") } settings.inv.send_type_default = 1 settings.inv.item_status = { #0: current.messages[\"NONE\"],", "'Postcode' settings.ui.label_postcode = \"ZIP Code\" # Uncomment to disable responsive", "# Uncomment to disable responsive behavior of datatables # -", "= True, ), S3LocationFilter(\"location_id\", label = T(\"Location\"), levels = (\"L1\",", "= \"iCal\", label = \"iCAL\", multiple = False, fields =", "Uncomment to allow Staff & Volunteers to be registered without", "if not r.component: hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\", ] if", "S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename == \"org_organisation\": if r.id:", "Site (Facilities-only for now) was last contacted settings.org.site_last_contacted = True", "Requests Management settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match = False", "levels = (\"L3\", \"L4\"), #hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\", #label", "Uncomment to disable checking that LatLons are within boundaries of", "name. You may use % as wildcard. Press 'Search' without", "== \"validate\": # # Can't validate image without the file", "MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields += [(MOBILE, \"phone.value\"),", "= \"UTC -0500\" # Uncomment these to use US-style dates", "\"read\": from gluon.validators import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2", "restricted = True, module_type = 4 )), # All modules", "# Enable the use of Organisation Groups settings.org.groups = \"Network\"", "current.T settings = current.deployment_settings \"\"\" Template settings for NYC Prepared", "can see this module in the default menu & access", "callable(standard_prep): result = standard_prep(r) if not result: return False from", "blurb (max. 100 characters)\") table.code.max_length = 100 table.comments.label = T(\"How", "this module isn't normally required. Rather it's main purpose is", "= True, #hidden = True, ), ] list_fields = [\"id\",", "%s\" % (skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\" % (item, body)", "editor in Newsfeed settings.cms.richtext = True # Uncomment to show", "\"name\", \"location_id\", ) s3db.configure(tablename, crud_form = crud_form, onvalidation = org_facility_onvalidation,", "= T(\"Requests\"), #description = \"Manage requests for supplies, assets, staff", "False return result s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller =", "return elif no_import: # Nothing to do :) return #else:", "\\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader", "label for Facilities in Inventory Management settings.inv.facility_label = \"Facility\" #", "= \"Membership Management System\", # restricted = True, # module_type", "label = T(\"Network\"), fields = [(\"\", \"group_id\"), (\"\", \"status_id\"), ],", "table.enabled, limitby = (0, 1) ).first() if url_exists: # Either", "fields = hr_fields, ), #S3SQLInlineComponent( # \"image\", # name =", "= True # Enable certain fields just for specific Organisations", "be possible to disable safely (\"hrm\", Storage( name_nice = T(\"Contacts\"),", "= \"L4\" ), # @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget", "= \"%s\\n%s\" % (item, body) # Lookup series_id stable =", "def customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep =", "in the menu )), (\"admin\", Storage( name_nice = T(\"Admin\"), #description", "Authentication settings # These settings should be changed _after_ the", "= (ctable.pe_id == r.record.pe_id) & \\ (ctable.contact_method == \"RSS\") &", "result s3.prep = custom_prep if current.auth.s3_logged_in(): # Allow components with", "settings.inv.stock_count = True # Uncomment to not track pack values", "= True # Uncomment this to request the Organisation when", "user is from same org/site as Contacts they create r.component.table.site_id.default", "the Site when a user registers #settings.auth.registration_requests_site = True #", "db.pr_person query = (mtable.group_id == group_id) & \\ (mtable.group_head ==", "for this URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled,", "Open links in IFrames open a full page in a", "} # ----------------------------------------------------------------------------- # Organisations # # Enable the use", "= True, # The user-visible functionality of this module isn't", "to adjust filters in Newsfeed when clicking on locations instead", "callable(standard_prep): result = standard_prep(r) else: result = True if not", "Storage( name_nice = T(\"Content Management\"), #description = \"Content Management System\",", "to show the chairperson of a group \"\"\" if hasattr(row,", "Requester settings.req.requester_label = \"Site Contact\" # Filter Requester as being", "import json data = old_rss = json.loads(old_rss)[\"data\"] if data: #", "(\"event\", Storage( name_nice = T(\"Events\"), #description = \"Activate Events (e.g.", "# Tell the client to request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn)", "org_group controllers) - runs after controller customisation - but runs", "== r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id = org.pe_id except:", "# ----------------------------------------------------------------------------- # Inventory Management # Uncomment to customise the", "Build Title & Body from the Request details priority =", "= s3.postp def custom_postp(r, output): # Call standard postp if", "), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels = (\"L3\", \"L4\"), #hidden", "\"Location\" # Uncomment to show the date when a Site", "get_vars.get(\"(organisation)\", None) if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default = organisation_id", "prior to being able to login? settings.auth.registration_requires_approval = True #", "label = \"\", multiple = False, fields = hr_fields, ),", "input to list all.\"), #_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label", "list_fields, ) return result s3.prep = custom_prep return attr settings.customise_project_project_controller", "from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label =", "None return result s3.prep = custom_prep # Custom postp standard_postp", "filter_widgets = [ S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"), _class =", "Do new users need to verify their email address? settings.auth.registration_requires_verification", "# Add RSS Channel _id = table.insert(name=name, enabled=True, url=rss_url) record", "limitby = (0, 1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\", None) if", "current.auth.s3_logged_in(): # Allow components with components (such as org/group) to", "crud_form, ) elif r.component_name == \"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"),", "ensure active or not as appropriate # Name field is", "depends on Assets #(\"vehicle\", Storage( # name_nice = T(\"Vehicles\"), #", "old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: # Nothing to do :)", "Impact Assessments\", restricted = True, module_type = 5, )), (\"event\",", "Default rss_import = None mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False)", "settings.pr.request_dob = False settings.pr.request_gender = False # Doesn't yet work", "rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first() # Build", "= [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\"", "\"%s\\n%s\" % (item, body) else: # Skills body = \"%s\\n%s\"", "if address: form_vars.name = address else: # We need a", "current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby = (0, 1) ).first() if", "T(\"Assets\"), #description = \"Recording and Assigning Assets\", restricted = True,", "Anonymous) can see this module in the default menu &", "the file # image_field = s3db.pr_image.image # image_field.requires = None", "if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: # Nothing", "(\"errors\", Storage( name_nice = T(\"Ticket Viewer\"), #description = \"Needed for", "fields = [(\"\", \"org_group_id\")], # @ToDo: Make this optional? multiple", "standard_postp = s3.postp def custom_postp(r, output): # Call standard postp", "= current.s3db s3 = current.response.s3 # Tell the client to", "\"last_name\", S3SQLInlineComponent( \"human_resource\", name = \"human_resource\", label = \"\", multiple", "restricted = True, module_type = 9, # 8th item in", "% r.component_id) # Default rss_import = None else: ctable =", "import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget = None from s3", "= custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def", "channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if", "Custom label for Organisations in HR module #settings.hrm.organisation_label = \"National", "for managing stock levels #settings.inv.direct_stock_edits = True # Uncomment to", "filterby = dict(field = \"name\", options=\"iCal\" ) ), S3SQLInlineComponent( \"document\",", "# Add Network Status to List Fields list_fields = s3db.get_config(\"org_organisation\",", "\"\"\" db = current.db s3db = current.s3db table = db.org_facility_type", "location, capacity and breakdown of victims in Shelters\", # restricted", "Contact one and link the URL one to this Contact", "= True # Uncomment this to use settings suitable for", "Title & Body from the Request details priority = rtable.priority.represent(row.priority)", "= \"phone2\", label = T(\"Phone2\"), multiple = False, fields =", "Do new users need to be approved by an administrator", "filter_widgets = filter_widgets, ) field = r.table.site_id # Don't assume", "= s3db.hrm_multi_skill_represent for skill in skills: item = \"%s %s\"", "True, # ), S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels = (\"L1\",", "= \"data\", label = T(\"Data\"), multiple = False, fields =", "fields = [(\"\", \"group_id\"), (\"\", \"status_id\"), ], ), S3SQLInlineComponent( \"address\",", "channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import: #", "#description = \"Ordering & Purchasing of Goods & Services\", #", ")), (\"project\", Storage( name_nice = T(\"Projects\"), #description = \"Tracking of", ") ), S3SQLInlineComponent( \"document\", name = \"data\", label = T(\"Data\"),", "10, # )), (\"req\", Storage( name_nice = T(\"Requests\"), #description =", "staff or other resources. Matches against Inventories where supplies are", "validate image without the file # image_field = s3db.pr_image.image #", "= current.db s3db = current.s3db table = db.org_facility_type ltable =", "NB This can also be over-ridden for specific contexts later", "Facilities Map @ToDo: Legend \"\"\" db = current.db s3db =", "f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants a", "r.component.table.site_id.default = None return result s3.prep = custom_prep # Custom", "= False, # fields = [(\"\", \"image\")], # filterby =", "1) ).first().location_id # Create Post ptable = s3db.cms_post _id =", "dict): if \"rheader\" in output: # Custom Tabs tabs =", "\"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ]", "else: # Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return", "import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\", \"acronym\"],", "= form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if not", "@ToDo: Legend \"\"\" db = current.db s3db = current.s3db table", "name_nice = T(\"Person Registry\"), #description = \"Central point to record", "Resource Management # Uncomment to chage the label for 'Staff'", "lookup fields settings.org.site_autocomplete = True # Extra fields to search", "module_type = 10, # )), #(\"member\", Storage( # name_nice =", "Uncomment these to use US-style dates in English settings.L10n.date_format =", "name_nice = T(\"Budgeting Module\"), # #description = \"Allows a Budget", "settings.cms.show_titles = True # ----------------------------------------------------------------------------- # Inventory Management # Uncomment", "record) # Add source link url = \"%s%s\" % (settings.get_base_public_url(),", "= T(\"Location\"), fields = [(\"\", \"location_id\")], ), # Partner Orgs", "\"\", multiple = False, fields = hr_fields, ), #S3SQLInlineComponent( #", "# restricted = True, # module_type = 2, # )),", "if r.component_id: # Update form db = current.db otable =", "(form fails to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts = False", "they create r.component.table.site_id.default = None return result s3.prep = custom_prep", "Custom PreP standard_prep = s3.prep def custom_prep(r): # Call standard", "form ctable = s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) &", "= table.type.writable = False return result s3.prep = custom_prep return", "= \"Tracks the location, capacity and breakdown of victims in", "= T(\"Search\"), comment = T(\"You can search by by group", "= False # Doesn't yet work (form fails to submit)", "\"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\", label = T(\"Type\"),", "= \"table_inline\" settings.msg.parser = \"NYC\" # Uncomment to Hide the", "to all still) module_type = 10 )), (\"org\", Storage( name_nice", "to disable Sectors in projects settings.project.sectors = False # Multiple", "), S3OptionsFilter(\"organisation_id\", filter = True, header = \"\", hidden =", "& where\". Allows relief agencies to coordinate their activities', restricted", "households to register to receive compensation and distributions\", # restricted", "module_type = None # This item is not shown in", "= None, )), (\"msg\", Storage( name_nice = T(\"Messaging\"), #description =", "settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser = \"NYC\" #", "table.description.widget = s3_comments_widget from gluon import Field table.chairperson = Field.Method(\"chairperson\",", "label for Sites settings.org.site_label = \"Facility\" #settings.org.site_label = \"Location\" #", "s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\",", "field.default = site_id field.readable = field.writable = False hr_fields.remove(\"site_id\") else:", "#hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden =", "Update form old_rss = form.record.sub_rsscontact import json data = old_rss", "\"\"\" Function to decide which Marker to use for Facilities", "= [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ]", "#settings.auth.registration_roles = { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\")", "s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method != \"read\": from gluon.validators", "] s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, ) field", "s3db.pr_image.image # image_field.requires = None if r.interactive or r.representation ==", "& imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\", ) s3db.configure(\"pr_group\", # Redirect", "import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label = \"\" #", "# Uncomment to disable the use of HR Skills #settings.hrm.use_skills", "(& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: # Update the", "client to request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP", "this to use Milestones in project/task. settings.project.milestones = False #", "] s3db.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets, list_fields =", "Create form: Default rss_import = None else: # Component if", "f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def", "Lookup series_id stable = s3db.cms_series try: series_id = db(stable.name ==", "dict(field = \"contact_method\", options = \"RSS\" ) ), S3SQLInlineComponent( \"document\",", "= T(\"Group Chairperson\") if r.component_name == \"group_membership\": from s3layouts import", "= \"\", hidden = True, ), S3LocationFilter(\"location_id\", label = T(\"Location\"),", "= True, module_type = 10 )), (\"assess\", Storage( name_nice =", "Administration\", restricted = True, module_type = None # No Menu", "= \"Human Resources Management\", restricted = True, module_type = 3,", ") ), S3SQLInlineComponent( \"contact\", name = \"twitter\", label = T(\"Twitter\"),", "control access to this module access = None, # All", "ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair: # Only used in", "table.meetings.readable = table.meetings.writable = True # Increase size of widget", "against Inventories where supplies are requested.\", restricted = True, module_type", "] list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"),", "- update Feed name url_exists.update_record(name=name) if no_import: if url_exists.enabled: #", "associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: # Check if we", "locations instead of opening the profile page settings.cms.location_click_filters = True", "s3db.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields,", "Request Management and Asset Management\", restricted = True, module_type =", "RSS Channel _id = table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id)", "= None else: ctable = s3db.pr_contact query = (ctable.pe_id ==", "= db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except: #", "T(\"Group Chairperson\") return result s3.prep = custom_prep # Custom postp", "field.widget = S3LocationSelectorWidget2(levels = (\"L2\",), points = True, polygons =", "by default settings.cms.filter_open = True # Uncomment to adjust filters", "hidden = True, ), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden =", "# # Create a new Feed # pass # Add", "without an Organisation settings.hrm.org_required = False # Uncomment to show", "table.id) rows = db(query).select(table.name) types = [row.name for row in", "hidden = True, ), S3OptionsFilter(\"site_id\", hidden = True, ), S3OptionsFilter(\"training.course_id\",", "Site\" in types: marker = \"asset\" elif \"Residential Building\" in", "#image_field.requires = IS_IMAGE() #image_field.widget = None from s3 import S3SQLCustomForm,", "# #hidden = True, # ), S3LocationFilter(\"location.location_id\", label = T(\"Location\"),", "elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return result", "url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource =", "settings.org.site_last_contacted = True # Enable certain fields just for specific", "opening the profile page settings.cms.location_click_filters = True # Uncomment to", "crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility Type\"), fields =", "----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function to decide which Marker to", "10, # )), #(\"member\", Storage( # name_nice = T(\"Members\"), #", "#hidden = True, # ), S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels", "if r.method in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2,", "Purchasing of Goods & Services\", # restricted = True, #", "field.widget = None script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site',", "s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name = \"human_resource\",", "to disable safely (\"hrm\", Storage( name_nice = T(\"Contacts\"), #description =", "title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable, record)", "] s3db.configure(\"pr_group\", list_fields = list_fields, ) elif r.component_name == \"organisation\":", "module_type = 10 # )), (\"asset\", Storage( name_nice = T(\"Assets\"),", "Storage( name_nice = T(\"Assets\"), #description = \"Recording and Assigning Assets\",", "list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"),", "= \"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\", name = \"email\", label", "in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table", "Analysis\", restricted = True, module_type = 9, # 8th item", "Management\", restricted = True, module_type = 3, )), #(\"vol\", Storage(", "label = T(\"Status\"), # Not translateable #represent = \"%(name)s\", cols", "to search in Autocompletes & display in Representations settings.org.site_autocomplete_fields =", "= [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"),", "= [\"name\", \"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields = list_fields,", "in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender = False # Doesn't", "if not result: return False from s3 import S3Represent, S3TextFilter,", "a Profile page?\" organisation_id = get_vars.get(\"(organisation)\", None) if organisation_id: field", "= \"iCAL\", multiple = False, fields = [(\"\", \"url\")], filterby", "hidden = True, ), S3LocationFilter(\"location_id\", label = T(\"Location\"), levels =", "hrm_human_resource resource (in facility, human_resource, organisation & person controllers) -", "row.comments if row.type == 1: # Items ritable = s3db.req_req_item", "Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent # We default this", "from s3 import s3_comments_widget table.description.widget = s3_comments_widget from gluon import", "# #description = \"Manage Vehicles\", # restricted = True, #", "We default this onvalidation table.name.notnull = False table.name.requires = None", "False # Uncomment this to disable Sectors in projects settings.project.sectors", "allow Staff & Volunteers to be registered without an Organisation", "= form.vars name = form_vars.get(\"name\", None) if name: return address", "to show Links in Newsfeed settings.cms.show_links = True # Uncomment", "= 3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"), # #hidden", "restricted = True, # module_type = 10 # )), (\"asset\",", "== \"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"),", "name_nice = T(\"Contacts\"), #description = \"Human Resources Management\", restricted =", "# Uncomment to customise the label for Facilities in Inventory", "= False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose =", "CMS # Uncomment to use Bookmarks in Newsfeed settings.cms.bookmarks =", "Contact Name or URL if no_import: if name_exists.enabled: # Disable", "group \"\"\" if hasattr(row, \"pr_group\"): row = row.pr_group try: group_id", "\"group_person\", label = T(\"Network\"), link = False, fields = [(\"\",", "Contact # - update Feed name url_exists.update_record(name=name) if no_import: if", "False, #widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service:", "values settings.inv.track_pack_values = False settings.inv.send_show_org = False # Types common", "custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups", "\"\"\" Default the name to the Street Address \"\"\" form_vars", "= s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\",", "db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default = manhattan.id table.mission.readable =", "Groups settings.org.groups = \"Network\" # Make Services Hierarchical settings.org.services_hierarchical =", "s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: # Update the URL name_exists.update_record(url=rss_url) if", "from gluon.validators import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field", "# ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db s3 = current.response.s3", "form: Default rss_import = None else: # Component if r.component_id:", "#description = \"Support Requests\", # restricted = True, # module_type", "s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent", "to show the date when a Site (Facilities-only for now)", "\"%s\\n%s\" % (item, body) # Lookup series_id stable = s3db.cms_series", "experience settings.hrm.staff_experience = False # Uncomment to disable the use", "url_exists: # We have 2 feeds: 1 for the Contact", "1: # Items ritable = s3db.req_req_item items = db(ritable.req_id ==", "social media, etc.\"), fields = [\"document_id\", \"name\", \"url\", \"comments\", ],", ": [], # \"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [],", "handle Start & End in 1! S3DateFilter(\"start_date\", label = T(\"Start", "appropriate Resources (Human, Assets & Facilities).\", restricted = True, module_type", "Scenario templates) for allocation of appropriate Resources (Human, Assets &", "= [(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"),", "), ] # Need to re-do list_fields as get over_written", "utf-8 -*- try: # Python 2.7 from collections import OrderedDict", "customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db s3 =", "True, module_type = None, # Not displayed )), (\"inv\", Storage(", "= T(\"Administration\"), #description = \"Site Administration\", restricted = True, module_type", "#elif \"Shelter\" in types: # marker = \"shelter\" else: #", "True # ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function to decide which", "Management settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match = False #settings.req.use_commit", "#settings.auth.always_notify_approver = False # Uncomment this to request the Mobile", "import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\",", "tablename): from gluon.html import DIV, INPUT from s3 import S3MultiSelectWidget,", ") field = r.table.site_id # Don't assume that user is", "= current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\",", "Runs after crud_form completes - creates a cms_post in the", "from req/create form # Hide most Fields from s3 import", "from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\",", "args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- # Human Resource Management # Uncomment", "Management settings.inv.facility_label = \"Facility\" # Uncomment if you need a", "normal Audit return True else: # Don't Audit non user-visible", "= form_vars.get(\"name\", None) if name: return address = form_vars.get(\"address\", None)", "current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r): #", "assume that user is from same org/site as Contacts they", "person_id instead of created_by in Newsfeed settings.cms.person = \"person_id\" #", "Feed # pass # Add RSS Channel _id = table.insert(name=name,", "False from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db", "Storage( # name_nice = T(\"Members\"), # #description = \"Membership Management", "url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts import", "\"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] # Don't include", "s3 = current.response.s3 # Tell the client to request per-feature", "& SMS\", restricted = True, # The user-visible functionality of", "= True, ), S3OptionsFilter(\"site_id\", hidden = True, ), S3OptionsFilter(\"training.course_id\", label", "created_by/modified_by using Names not Emails settings.ui.auth_user_represent = \"name\" # Record", "menu )), (\"appadmin\", Storage( name_nice = T(\"Administration\"), #description = \"Site", "# name_nice = T(\"Shelters\"), # #description = \"Tracks the location,", "= \"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form,", ") record = dict(id=_id) s3db.update_super(ptable, record) # Add source link", "settings.ui.auth_user_represent = \"name\" # Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for", "@ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols = 5, ),", "= {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", }, ), #\"budget\",", "return else: # Nothing to do :) return # Check", "'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script) return", "# Enable certain fields just for specific Organisations # empty", "# No Menu )), (\"errors\", Storage( name_nice = T(\"Ticket Viewer\"),", "for the menu # )), (\"gis\", Storage( name_nice = T(\"Map\"),", "current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, ) field = r.table.site_id #", "(item, body) else: # Skills body = \"%s\\n%s\" % (row.purpose,", "s3db.update_super(ptable, record) # Add source link url = \"%s%s\" %", "% (row.purpose, body) rstable = s3db.req_req_skill skills = db(rstable.req_id ==", "# module_type = 2, # )), (\"cms\", Storage( name_nice =", "Storage( name_nice = T(\"Inventory\"), #description = \"Receiving and Sending Items\",", "# Audit def audit_write(method, tablename, form, record, representation): if not", "and (r.interactive or r.representation == \"aadata\"): from s3 import S3SQLCustomForm,", "represents settings.hrm.show_organisation = True # Uncomment to disable Staff experience", "pass # Add RSS Channel _id = table.insert(name=name, enabled=True, url=rss_url)", "Project filter = {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", },", "if url_exists: # Either Contact has changed Name or this", "No Menu )), (\"sync\", Storage( name_nice = T(\"Synchronization\"), #description =", "== req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent", ") elif r.component_name == \"human_resource\": # Don't assume that user", "async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) #", "= True # Uncomment to show Tags in Newsfeed settings.cms.show_tags", "in Newsfeed settings.cms.show_titles = True # ----------------------------------------------------------------------------- # Inventory Management", "feed is being deleted, so we should disable it old_rss", "old_rss = json.loads(old_rss)[\"data\"] if data: # RSS feed is being", "\"Sends & Receives Alerts via Email & SMS\", restricted =", "appropriate # Name field is unique so rename old one", "dict(field = \"role\", options = \"2\" ) ), S3SQLInlineComponent( \"document\",", "= T(\"Phone2\"), multiple = False, fields = [(\"\", \"value\")], filterby", "record = dict(id=_id) s3db.update_super(table, record) # Enable channel_id = record[\"channel_id\"]", "rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1) ).first() if url_exists:", "also be over-ridden for specific contexts later # e.g. Activities", "old_rss = form.record.sub_rsscontact import json data = old_rss = json.loads(old_rss)[\"data\"]", "customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html import DIV,", "import s3_fullname T = current.T settings = current.deployment_settings \"\"\" Template", "Update the URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: # Disable", "This item is handled separately for the menu )), (\"appadmin\",", "Form #image_field = s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires =", "= crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label", "fields = [(\"\", \"value\")], filterby = dict(field = \"contact_method\", options", "options=[True] # ) # ), ] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"),", "S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields = list_fields, ) elif", "None # This item is not shown in the menu", "T(\"Facebook\"), multiple = False, fields = [(\"\", \"value\")], filterby =", "Asset Management\", restricted = True, module_type = None, # Not", ")), (\"gis\", Storage( name_nice = T(\"Map\"), #description = \"Situation Awareness", "S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form", "or r.representation == \"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable", "True, ), ] s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets,", "(mtable.group_id == group_id) & \\ (mtable.group_head == True) & \\", "from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars", "GeoNames username settings.gis.geonames_username = \"eden_nyc\" # Uncomment to show created_by/modified_by", "# Don't include prepop return False if tablename in (\"cms_post\",", "from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db =", "= \"rss\", label = T(\"RSS\"), multiple = False, fields =", "= \"contact_method\", options = \"EMAIL\" ) ), \"website\", S3SQLInlineComponent( \"contact\",", "db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0,", "settings.cms.bookmarks = True # Uncomment to use have Filter form", "this Contact db = current.db name = form_vars.name table =", "else: result = True if not r.component and (r.interactive or", "= filter_widgets, list_fields = list_fields, ) return result s3.prep =", "\"Human Resources Management\", # restricted = True, # module_type =", "{\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy =", "\"url\", \"comments\", ], filterby = dict(field = \"name\") ), S3SQLInlineComponentCheckbox(", "), \"website\", S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value =", "\"\"\" s3db = current.s3db form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0", "row in rows] # Use Marker in preferential order if", "activate hierarchical org_service: #from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter", "= \"FACEBOOK\" ) ), \"meetings\", \"comments\", postprocess = pr_contact_postprocess, )", "associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled: # Nothing to", "projects settings.project.sectors = False # Multiple partner organizations settings.project.multiple_organisations =", "postp standard_postp = s3.postp def custom_postp(r, output): # Call standard", "False # Uncomment this to request the Mobile Phone when", "fields = [(\"\", \"url\")], filterby = dict(field = \"name\", options=\"Data\"", "= [row.name for row in rows] # Use Marker in", "settings.req.type_inv_label = \"Supplies\" # Uncomment to enable Summary 'Site Needs'", "label = T(\"Training\"), hidden = True, ), S3OptionsFilter(\"group_membership.group_id\", label =", "# ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS Feeds \"\"\"", "T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"status_id\", label = T(\"Status\"), #", "Breadcrumbs\", restricted = False, module_type = None # No Menu", "label = T(\"Partner Organizations\"), fields = [\"organisation_id\", \"comments\", # NB", "if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields +=", "\"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except: # Prepop hasn't been", "False, fields = [(\"\", \"value\")], filterby = dict(field = \"contact_method\",", "so HTML is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else:", "#description = \"Synchronization\", restricted = True, access = \"|1|\", #", ")), (\"sync\", Storage( name_nice = T(\"Synchronization\"), #description = \"Synchronization\", restricted", "SMS\", restricted = True, # The user-visible functionality of this", "be open by default settings.cms.filter_open = True # Uncomment to", "to use organisation_id instead of created_by in Newsfeed settings.cms.organisation =", "name_nice = T(\"Support\"), # #description = \"Support Requests\", # restricted", "# Filter Activity Type by Project filter = {\"linktable\": \"project_activity_type_project\",", "\"url\")], filterby = dict(field = \"name\", options=\"Data\" ) ), S3SQLInlineComponent(", "# Build Title & Body from the Request details priority", "stock levels #settings.inv.direct_stock_edits = True # Uncomment to call Stock", "is not shown in the menu )), (\"admin\", Storage( name_nice", "current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\",", "tab settings.ui.iframe_opens_full = True settings.ui.label_attachments = \"Media\" settings.ui.update_label = \"Edit\"", "This is labelled 'Role' in DRRPP ], filterby = dict(field", ")), (\"inv\", Storage( name_nice = T(\"Inventory\"), #description = \"Receiving and", "import S3SQLCustomForm, S3SQLInlineComponent if r.method != \"read\": from gluon.validators import", "onvalidation table.name.notnull = False table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent(", "to List Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return", "contexts later # e.g. Activities filtered to those of parent", "just certain countries # NB This can also be over-ridden", "s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent(", "table.meetings.readable = table.meetings.writable = True if r.id: # Update form", "function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check Now async = current.s3task.async async(\"msg_poll\",", "] # Need to re-do list_fields as get over_written by", "Feed\")), name = \"rss\", label = T(\"RSS\"), multiple = False,", "= S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")),", "Sectors in projects settings.project.sectors = False # Multiple partner organizations", "a full page in a new tab settings.ui.iframe_opens_full = True", "custom_postp(r, output): # Call standard postp if callable(standard_postp): output =", "True, ), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter = True, #header", "True, ), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden = True, ),", "# Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif", "= \"residence\" #elif \"Shelter\" in types: # marker = \"shelter\"", "new users need to be approved by an administrator prior", "users be allowed to register themselves? settings.security.self_registration = \"index\" #", "marker = \"hospital\" elif \"Food\" in types: marker = \"food\"", "see this module in the default menu (access to controller", "\"org_facility\", \"org_organisation\", \"req_req\", ): # Perform normal Audit return True", "#settings.gis.check_within_parent_boundaries = False # GeoNames username settings.gis.geonames_username = \"eden_nyc\" #", "filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\", ) s3db.configure(\"pr_group\", #", "# )), (\"asset\", Storage( name_nice = T(\"Assets\"), #description = \"Recording", "), S3SQLInlineComponent( \"contact\", name = \"twitter\", label = T(\"Twitter\"), multiple", "% r.application s3.jquery_ready.append(script) return result s3.prep = custom_prep return attr", "), # Partner Orgs S3SQLInlineComponent( \"organisation\", name = \"partner\", label", "A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def", "= [\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ]", "feed is associated with # another Contact # - update", "= True, ), \"name\", \"location_id\", ) s3db.configure(tablename, crud_form = crud_form,", "group has been created create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]),", "settings.req.requester_optional = True settings.req.date_writable = False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable", "10, )), # Vehicle depends on Assets #(\"vehicle\", Storage( #", "ImageCrop widget doesn't currently work within an Inline Form #image_field", "table.url, limitby = (0, 1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\", None)", "= True, ), # @ToDo: Widget to handle Start &", "= db.pr_person query = (mtable.group_id == group_id) & \\ (mtable.group_head", "and Assigning Assets\", restricted = True, module_type = 10, )),", "Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants a simplified version,", "Phone when a user registers settings.auth.registration_requests_mobile_phone = True # Uncomment", "def audit_write(method, tablename, form, record, representation): if not current.auth.user: #", "# Call standard prep if callable(standard_prep): result = standard_prep(r) if", "profile page settings.cms.location_click_filters = True # Uncomment to use organisation_id", "from the Request details priority = rtable.priority.represent(row.priority) date_required = row.date_required", "or r.representation == \"aadata\"): from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox", "= \"Site Administration\", restricted = True, access = \"|1|\", #", "current.s3db #if r.method == \"validate\": # # Can't validate image", "to use for Facilities Map @ToDo: Legend \"\"\" db =", "\"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields = list_fields, ) if", "Legend \"\"\" db = current.db s3db = current.s3db table =", "= True, header = \"\", hidden = True, ), ]", "have a channel for this URL url_exists = db(table.url ==", "mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\"", "\"name\" # Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for = (\"org_organisation\",)", "standard_prep(r) else: result = True if not r.component and (r.interactive", "handled separately for the menu )), (\"appadmin\", Storage( name_nice =", "ptable = db.pr_person query = (mtable.group_id == group_id) & \\", "s3db.pr_contact query = (ctable.pe_id == pe_id) & \\ (ctable.contact_method ==", "link = False, fields = [(\"\", \"group_id\")], multiple = False,", "even if the user is automatically approved #settings.auth.always_notify_approver = False", "s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # -----------------------------------------------------------------------------", "result = standard_prep(r) else: result = True if r.interactive: if", "= (0, 1) ).first() if url_exists: # We have 2", "crud_form = S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\",", "name to the Street Address \"\"\" form_vars = form.vars name", "r.method in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2", "# name_nice = T(\"Disaster Victim Registry\"), # #description = \"Allow", "T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent", "user, even if the user is automatically approved #settings.auth.always_notify_approver =", "stable = s3db.cms_series try: series_id = db(stable.name == \"Request\").select(stable.id, cache=s3db.cache,", "[\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name = \"human_resource\", label =", "group_id) & \\ (mtable.group_head == True) & \\ (mtable.person_id ==", "restricted = True, module_type = 5, )), #(\"cr\", Storage( #", "= T(\"Disaster Victim Registry\"), # #description = \"Allow affected individuals", "# Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else:", "= \"post_organisation.organisation_id\" # Uncomment to use org_group_id in Newsfeed settings.cms.organisation_group", "Inventory Management # Uncomment to customise the label for Facilities", "has been created create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), )", "Field table.chairperson = Field.Method(\"chairperson\", chairperson) # Format for filter_widgets &", "Menu )), (\"errors\", Storage( name_nice = T(\"Ticket Viewer\"), #description =", "wants a simplified version, but don't want inconsistent across tabs", "label = T(\"Email\"), multiple = False, fields = [(\"\", \"value\")],", "System\", # restricted = True, # module_type = 10, #", "False, # module_type = 10, # )), #(\"member\", Storage( #", "None) if name_exists: if name_exists.url == rss_url: # No change", "settings.hrm.staff_label = \"Contacts\" # Uncomment to allow Staff & Volunteers", "= DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import, ), T(\"Don't Import Feed\")),", "Site when a user registers #settings.auth.registration_requests_site = True # Roles", "over_written by hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\",", "# Create form: Default rss_import = None mtable = s3db.org_group_membership", "order to secure the deployment # Should users be allowed", "details priority = rtable.priority.represent(row.priority) date_required = row.date_required if date_required: date", "# ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom prep", "if r.tablename == \"org_organisation\": if r.id: # Update form ctable", "= False table.type.readable = table.type.writable = False return result s3.prep", "# Enable this to have Open links in IFrames open", "def custom_prep(r): # Call standard prep if callable(standard_prep): result =", "# module_type = 10, # )), # @ToDo: Rewrite in", "toolbar settings.L10n.display_toolbar = False # Default timezone for users settings.L10n.utc_offset", "----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS Feeds \"\"\" s3db", "High marker = \"%s_red\" % marker elif reqs == 2:", "== group_id) & \\ (mtable.group_head == True) & \\ (mtable.person_id", "s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return result s3.prep = custom_prep #", "'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script) return result s3.prep", "show_postcode=True, ) elif r.component_name == \"human_resource\": # Don't assume that", "(T(\"Office\"), \"human_resource.site_id\"), ] # Don't include Email/Phone for unauthenticated users", "change to either Contact Name or URL if no_import: if", "table = s3db.pr_group field = table.group_type field.default = 3 #", "characters)\") table.code.max_length = 100 table.comments.label = T(\"How people can help\")", "), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter = True, header =", "Create form: Default rss_import = None mtable = s3db.org_group_membership mtable.group_id.widget", "of HR Description settings.hrm.use_description = False # Change the label", "settings.base.system_name_short = T(\"NYC Prepared\") # Theme (folder to use for", "settings.pr.request_gender = False # Doesn't yet work (form fails to", "= 5 # Controller, Function & Table ACLs # Enable", "#2: T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\") } # ----------------------------------------------------------------------------- #", "r.method == \"validate\": # # Can't validate image without the", "return current.messages[\"NONE\"] db = current.db mtable = current.s3db.pr_group_membership ptable =", "Selector to just certain countries # NB This can also", "IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif", "return result s3.prep = custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller", "crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) return result", "\"\", hidden = True, ), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter", "custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r,", "= T(\"Data\"), multiple = False, fields = [(\"\", \"url\")], filterby", "use person_id instead of created_by in Newsfeed settings.cms.person = \"person_id\"", "Sites #settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function", "so we should disable it old_rss = data[0][\"value\"][\"value\"] table =", "settings.ui.label_attachments = \"Media\" settings.ui.update_label = \"Edit\" # Uncomment to disable", "= \"Groups\" # Custom label for Organisations in HR module", "----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource resource (in facility,", "settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- # Persons # Uncomment to", "= T(\"Admin\"), #description = \"Site Administration\", restricted = True, access", "def facility_marker_fn(record): \"\"\" Function to decide which Marker to use", "if name: return address = form_vars.get(\"address\", None) if address: form_vars.name", "Channel _id = table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table,", "= [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match = False #settings.req.use_commit = False", "current.response.s3 # Tell the client to request per-feature markers s3db.configure(\"org_facility\",", "Storage( name_nice = T(\"Assessments\"), #description = \"Rapid Assessments & Flexible", "Gets replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires", "options = \"L4\" ), # @ToDo: GroupedCheckbox Widget or Hierarchical", "is labelled 'Role' in DRRPP ], filterby = dict(field =", "s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, ) field = r.table.site_id # Don't", "Now async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"])", "= field.writable = False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None #", "settings.msg.parser = \"NYC\" # Uncomment to Hide the language toolbar", "= \"project_project\" table.code.label = T(\"Project blurb (max. 100 characters)\") table.code.max_length", "= T(\"Building Assessments\"), # #description = \"Building Safety Assessments\", #", "Registry\"), #description = \"Central point to record details on People\",", "\"data\", label = T(\"Data\"), multiple = False, fields = [(\"\",", "Storage( name_nice = T(\"Person Registry\"), #description = \"Central point to", "cols = 3, # Filter Activity Type by Project filter", "name = form_vars.name table = s3db.msg_rss_channel name_exists = db(table.name ==", "== rss_url: # No change to either Contact Name or", "\"comments\", ) filter_widgets = [ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ],", "\"service_id\", # activate hierarchical org_service: #leafonly = False, #widget =", "Summary 'Site Needs' tab for Offices/Facilities settings.req.summary = True #", "s3db = current.s3db #if r.method == \"validate\": # # Can't", "\"warehouse\" elif \"Medical Clinic\" in types: marker = \"hospital\" elif", "#description = \"Activate Events (e.g. from Scenario templates) for allocation", "table.comments.label = T(\"How people can help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script)", "org_facility_onvalidation(form): \"\"\" Default the name to the Street Address \"\"\"", "= \"TWITTER\" ) ), S3SQLInlineComponent( \"contact\", name = \"facebook\", label", "by hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"),", "#S3SQLInlineComponent( # \"image\", # name = \"image\", # label =", "label = T(\"Neighborhoods Served\"), field = \"location_id\", filterby = dict(field", "\"contact\", name = \"phone\", label = T(\"Phone\"), multiple = False,", "= T(\"Facebook\"), multiple = False, fields = [(\"\", \"value\")], filterby", "\"contact_method\", options = \"FACEBOOK\" ) ), \"meetings\", \"comments\", postprocess =", "the menu )), (\"pr\", Storage( name_nice = T(\"Person Registry\"), #description", "Management and Asset Management\", restricted = True, module_type = None,", "= \"%s %s %s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body =", "%s not found: cannot set rss_import correctly\" % r.component_id) #", "= \"human_resource\", label = \"\", multiple = False, fields =", "), #\"budget\", #\"currency\", \"comments\", ) from s3 import S3TextFilter, S3OptionsFilter,", "= S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields = list_fields, )", "= [(\"\", \"value\")], filterby = dict(field = \"contact_method\", options =", "-*- coding: utf-8 -*- try: # Python 2.7 from collections", "----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group resource (in group", "(in group & org_group controllers) - runs after controller customisation", "disable the use of HR Skills #settings.hrm.use_skills = False #", "== False) rss = db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss", "by group name, description or comments and by network name.", "= True, ) # Default location to Manhattan db =", "True # ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs after crud_form completes", "# ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom prep", "types: # marker = \"shelter\" else: # Unknown marker =", "but runs before prep \"\"\" s3db = current.s3db from s3", "= \"Needed for Breadcrumbs\", restricted = False, module_type = None", "if row.type == 1: # Items ritable = s3db.req_req_item items", "it old_rss = data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old = current.db(table.url", "None # Use a hierarchical dropdown instead of AC field.widget", "via Email & SMS\", restricted = True, # The user-visible", "s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars =", "#settings.req.use_req_number = False # Label for Requester settings.req.requester_label = \"Site", "most Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent # We default", "the 1st (admin) user is # registered in order to", "settings.org.services_hierarchical = True # Set the label for Sites settings.org.site_label", "\"partner\", label = T(\"Partner Organizations\"), fields = [\"organisation_id\", \"comments\", #", ") from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical", "reports\", restricted = True, module_type = None, )), (\"msg\", Storage(", "Uncomment to disable the use of HR Description settings.hrm.use_description =", "= [(\"\", \"org_group_id\")], # @ToDo: Make this optional? multiple =", "breakdown of victims in Shelters\", # restricted = True, #", "# Change the label of \"Teams\" to \"Groups\" settings.hrm.teams =", "= T(\"Facility Type\"), fields = [(\"\", \"facility_type_id\")], multiple = False,", "url_exists.enabled: # Nothing to do :) return else: # Enable", ":) return # Check if we already have a channel", "(T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] # Don't include Email/Phone", "and Tasks\", restricted = True, module_type = 10 )), (\"assess\",", "T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable = table.meetings.writable = True #", "\"post_organisation.organisation_id\" # Uncomment to use org_group_id in Newsfeed settings.cms.organisation_group =", "= T(\"Procurement\"), # #description = \"Ordering & Purchasing of Goods", "settings.req.items_ask_purpose = False #settings.req.use_req_number = False # Label for Requester", "to hide fields in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender =", "supplies, assets, staff or other resources. Matches against Inventories where", "& display in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", ) #", "Update form ctable = s3db.pr_contact query = (ctable.pe_id == r.record.pe_id)", "Education settings.hrm.use_education = False # Uncomment to disable the use", "if r.component_name == \"facility\": if r.method in (None, \"create\", \"update\"):", "crud_form = crud_form, list_fields = list_fields, ) elif r.component_name ==", "\\ S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess", "), S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods Served\"), field = \"location_id\",", "= table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check Now async =", "Extra fields to search in Autocompletes & display in Representations", "Organisation settings.hrm.org_required = False # Uncomment to show the Organisation", "# Uncomment to use organisation_id instead of created_by in Newsfeed", "limitby = (0, 1) ).first() if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\",", "non user-visible resources return False settings.security.audit_write = audit_write # -----------------------------------------------------------------------------", "# Controller, Function & Table ACLs # Enable this to", "name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return", "header = \"\", hidden = True, ), ] s3db =", "to allow Staff & Volunteers to be registered without an", "= table.organisation_id.writable = False table.type.readable = table.type.writable = False return", "org_service: #S3SQLInlineLink( \"service\", label = T(\"Services\"), field = \"service_id\", #", "0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default", "is automatically approved #settings.auth.always_notify_approver = False # Uncomment this to", "if current.auth.s3_logged_in(): # Allow components with components (such as org/group)", "item is handled separately for the menu # )), (\"gis\",", "= current.T settings = current.deployment_settings \"\"\" Template settings for NYC", "form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if not rss_url:", "s3db = current.s3db if r.tablename == \"org_organisation\": if r.id: #", "----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db s3 = current.response.s3 #", "), \"comments\", postprocess = pr_contact_postprocess, ) from s3 import S3LocationFilter,", "= s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby =", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"WORK_PHONE\"", "no_import: if url_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\",", "tested settings.ui.datatables_responsive = False # PDF to Letter settings.base.paper_size =", "else: # Create form: Default rss_import = None else: #", "= T(\"Contacts\"), #description = \"Human Resources Management\", restricted = True,", "] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return output s3.postp = custom_postp", "= \"Situation Awareness & Geospatial Analysis\", restricted = True, module_type", "\"\"\" Import Organisation/Network RSS Feeds \"\"\" s3db = current.s3db form_vars", "T(\"United States Dollars\"), } settings.L10n.languages = OrderedDict([ (\"en\", \"English\"), (\"es\",", "projects (called 'blurb' in NYC) settings.project.codes = True # Uncomment", "filterby = dict(field = \"contact_method\", options = \"EMAIL\")), ) crud_form", "label for 'Staff' settings.hrm.staff_label = \"Contacts\" # Uncomment to allow", "= dict(field = \"contact_method\", options = \"EMAIL\" ) ), \"website\",", "limitby=(0, 1) ).first() return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\"", "\"Shelter\" in types: # marker = \"shelter\" else: # Unknown", "None else: # Component if r.component_id: # Update form db", "resources return False settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- # CMS", "False, # Use ACLs to control access to this module", "(mtable.group_head == True) & \\ (mtable.person_id == ptable.id) chair =", "S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label", "do :) return #else: # # Create a new Feed", "old = current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby = (0, 1)", "menu (access to controller is possible to all still) module_type", "# Uncomment to use Bookmarks in Newsfeed settings.cms.bookmarks = True", ")), (\"msg\", Storage( name_nice = T(\"Messaging\"), #description = \"Sends &", "\"image\")], # filterby = dict(field = \"profile\", # options=[True] #", "item = \"%s %s %s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body", "be registered without an email address settings.hrm.email_required = False #", "name = form_vars.get(\"name\", None) if name: return address = form_vars.get(\"address\",", "controller is possible to all still) module_type = 10 )),", "module_type = None # No Menu )), (\"sync\", Storage( name_nice", "# )), #(\"member\", Storage( # name_nice = T(\"Members\"), # #description", "enter, and manage surveys.\", restricted = True, module_type = 5,", "see this module in the default menu & access the", "the label for Facilities in Inventory Management settings.inv.facility_label = \"Facility\"", "= \"contact_method\", options = \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name", "r.representation == \"aadata\": if not r.component: hr_fields = [\"organisation_id\", \"job_title_id\",", "address else: # We need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)", "# All modules below here should be possible to disable", "= current.db gtable = db.gis_location query = (gtable.name == \"New", "Street Address \"\"\" form_vars = form.vars name = form_vars.get(\"name\", None)", "to use Bookmarks in Newsfeed settings.cms.bookmarks = True # Uncomment", "manhattan: field.default = manhattan.id table.mission.readable = table.mission.writable = True table.meetings.readable", "#widget = \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"), fields", "Bookmarks in Newsfeed settings.cms.bookmarks = True # Uncomment to use", "URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby =", "# Custom PreP standard_prep = s3.prep def custom_prep(r): # Call", "import Storage from s3 import s3_fullname T = current.T settings", "# Format for filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\",", "#description = \"Human Resources Management\", restricted = True, module_type =", "True, ) # Default location to Manhattan db = current.db", "FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if r.interactive: tablename = \"org_facility\" table", "[(\"\", \"group_id\")], multiple = False, ), \"job_title_id\", \"start_date\", ) list_fields", "# #description = \"Building Safety Assessments\", # restricted = True,", "if not result: return False if r.method not in (\"read\",", "= T(\"Events\"), #description = \"Activate Events (e.g. from Scenario templates)", "settings.ui.label_postcode = \"ZIP Code\" # Uncomment to disable responsive behavior", "= s3db.pr_contact query = (ctable.pe_id == pe_id) & \\ (ctable.contact_method", "body = \"%s\\n%s\" % (item, body) # Lookup series_id stable", "True, module_type = 3, )), #(\"vol\", Storage( # name_nice =", "10, )), (\"survey\", Storage( name_nice = T(\"Surveys\"), #description = \"Create,", "from Scenario templates) for allocation of appropriate Resources (Human, Assets", "# ----------------------------------------------------------------------------- # Projects # Use codes for projects (called", "else: # Create form: Default rss_import = None crud_form =", "\"comments\", ) from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"TWITTER\"", "hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\",", "\"organisation_type\", field = \"organisation_type_id\", label = T(\"Type\"), multiple = False,", "for Requester settings.req.requester_label = \"Site Contact\" # Filter Requester as", "table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False table.type.readable =", "Chairperson\") if r.component_name == \"group_membership\": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment", "= True, module_type = None, # Not displayed )), (\"inv\",", "return output s3.postp = custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller", "fields = [(\"\", \"location_id\")], ), # Partner Orgs S3SQLInlineComponent( \"organisation\",", "same org/site as Contacts they create field.default = None #", "= True, header = \"\", hidden = True, ), S3OptionsFilter(\"group_person.group_id\",", "import current from gluon.html import A, URL from gluon.storage import", "# Make Services Hierarchical settings.org.services_hierarchical = True # Set the", "name_exists.enabled: # Nothing to do :) return else: # Enable", "\"Membership Management System\", # restricted = True, # module_type =", "their parent #settings.gis.check_within_parent_boundaries = False # GeoNames username settings.gis.geonames_username =", "= T(\"Map\"), #description = \"Situation Awareness & Geospatial Analysis\", restricted", "register themselves? settings.security.self_registration = \"index\" # Do new users need", "org/group) to breakout from tabs attr[\"native\"] = True return attr", "\"Site Contact\" # Filter Requester as being from the Site", "----------------------------------------------------------------------------- # CMS # Uncomment to use Bookmarks in Newsfeed", "None # This item is handled separately for the menu", "T(\"Volunteers\"), # #description = \"Human Resources Management\", # restricted =", "s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # -----------------------------------------------------------------------------", "except: current.log.error(\"Org %s not found: cannot set rss_import correctly\" %", "\"\"\" Template settings for NYC Prepared \"\"\" # Pre-Populate settings.base.prepopulate", "S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter = True, #header = \"\",", "= current.s3db.pr_group_membership ptable = db.pr_person query = (mtable.group_id == group_id)", "\"address\", label = T(\"Address\"), multiple = False, # This is", "list_fields, ) elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\")", "Tags in Newsfeed settings.cms.show_tags = True # Uncomment to show", "group_id = row.id except: # not available return current.messages[\"NONE\"] db", "= None else: # Component if r.component_id: # Update form", "# Read the full record row = db(rtable.id == req_id).select(rtable.type,", "T(\"Disaster Victim Registry\"), # #description = \"Allow affected individuals &", "= True # Do new users need to be approved", "# Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif", "= 5, )), #(\"cr\", Storage( # name_nice = T(\"Shelters\"), #", "has changed Name or this feed is associated with #", "'Mobile Phone' settings.ui.label_mobile_phone = \"Cell Phone\" # Enable this to", "numbers (defaults to ,) settings.L10n.decimal_separator = \".\" # Thousands separator", "S3SQLInlineComponent( \"contact\", name = \"email\", label = EMAIL, multiple =", "\"group_id\", ) s3db.configure(\"pr_group\", # Redirect to member list when a", "= current.db s3db = current.s3db rtable = s3db.req_req # Read", "name = \"email\", label = T(\"Email\"), multiple = False, fields", "list_fields.insert(1, \"group_membership.status_id\") return result s3.prep = custom_prep if current.auth.s3_logged_in(): #", "Custom postp standard_postp = s3.postp def custom_postp(r, output): # Call", "args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form):", "= True # Uncomment this to use Milestones in project/task.", "settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import Organisation/Network", "= \"Media\" settings.ui.update_label = \"Edit\" # Uncomment to disable checking", "== \"aadata\": if not r.component: from s3 import S3TextFilter, S3OptionsFilter,", "row.pr_group try: group_id = row.id except: # not available return", "module_type = 10, )), # Vehicle depends on Assets #(\"vehicle\",", "False, fields = [(\"\", \"value\"), #(T(\"Don't Import Feed\"), \"poll\"), ],", "name_nice = T(\"Messaging\"), #description = \"Sends & Receives Alerts via", "----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db s3 = current.response.s3 #", "= \"NYC\" # Uncomment to Hide the language toolbar settings.L10n.display_toolbar", "after crud_form completes - creates a cms_post in the newswire", "== \"human_resource\": # Don't assume that user is from same", "from other modules. module_type = None, )), (\"supply\", Storage( name_nice", "= s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget", "# name_nice = T(\"Vehicles\"), # #description = \"Manage Vehicles\", #", "if r.method in (\"create\", \"update\"): field.label = \"\" # Gets", "use Bookmarks in Newsfeed settings.cms.bookmarks = True # Uncomment to", ": [], # \"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\" : [],", "not Emails settings.ui.auth_user_represent = \"name\" # Record Approval settings.auth.record_approval =", "to change the label for 'Mobile Phone' settings.ui.label_mobile_phone = \"Cell", "if r.component_name == \"group_membership\": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment =", "or this feed is associated with # another Contact #", "\"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form = crud_form, filter_widgets =", "or r.representation == \"aadata\": if not r.component: from s3 import", "= False # Uncomment this to request the Mobile Phone", "----------------------------------------------------------------------------- # Audit def audit_write(method, tablename, form, record, representation): if", "to those of parent Project settings.gis.countries = (\"US\",) settings.fin.currencies =", "access to this module access = None, # All Users", "ISO 31-0) # Decimal separator for numbers (defaults to ,)", "field.writable = False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None) if site_id:", "Facilities).\", restricted = True, module_type = 10, )), (\"survey\", Storage(", "elif no_import: # Nothing to do :) return #else: #", "Uncomment to hide fields in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender", "# ), ] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\",", "# Check if we already have a channel for this", "= S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility Type\"), fields = [(\"\",", "None else: ctable = s3db.pr_contact query = (ctable.pe_id == pe_id)", "s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field = table.location_id", "Organisation name in HR represents settings.hrm.show_organisation = True # Uncomment", "# Colour code by open/priority requests reqs = record.reqs if", "Uncomment to use person_id instead of created_by in Newsfeed settings.cms.person", "the deployment # Should users be allowed to register themselves?", "IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) )", "elif url_exists.enabled: # Nothing to do :) return else: #", "\"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\",", "if name_exists.url == rss_url: # No change to either Contact", "T(\"Admin\"), #description = \"Site Administration\", restricted = True, access =", "ptable.id, limitby=(0, 1)).first() if chair: # Only used in list", "want inconsistent across tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True", "inv & req tabs from Sites #settings.org.site_inv_req_tabs = True #", ") elif r.component_name == \"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\",", "\"Recording and Assigning Assets\", restricted = True, module_type = 10,", "\\ (gtable.level == \"L2\") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if", ")), (\"supply\", Storage( name_nice = T(\"Supply Chain Management\"), #description =", "= 10, )), (\"survey\", Storage( name_nice = T(\"Surveys\"), #description =", "), ] s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, )", "db = current.db otable = s3db.org_organisation org = db(otable.id ==", "from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db =", "list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\",", "= \"staff\" settings.security.policy = 5 # Controller, Function & Table", "Nothing to do :) return # Check if we already", "output s3.postp = custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller #", "\"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return result s3.prep = custom_prep", "as being from the Site settings.req.requester_from_site = True # Label", "= True, ), ] list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"),", "Storage( # name_nice = T(\"Disaster Victim Registry\"), # #description =", "Nothing to do :) return #else: # # Create a", "), ] list_fields = [\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\",", "return result s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller", "# Name field is unique so rename old one name_exists.update_record(name=\"%s", "#hidden = True, ), ] # Need to re-do list_fields", "(\"req\", Storage( name_nice = T(\"Requests\"), #description = \"Manage requests for", "item in items: item = \"%s %s %s\" % (item.quantity,", "module_type = 10 )), #(\"proc\", Storage( # name_nice = T(\"Procurement\"),", "r.component_name == \"group_membership\": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\", "[(\"\", \"facility_type_id\")], multiple = False, required = True, ), \"name\",", "\"contact\", name = \"phone2\", label = T(\"Phone2\"), multiple = False,", "= False return result s3.prep = custom_prep return attr settings.customise_hrm_job_title_controller", "Contact & 1 for the URL # Disable the old", "Management\"), #description = \"Used within Inventory Management, Request Management and", "'Role' in DRRPP ], filterby = dict(field = \"role\", options", "Staff & Volunteers to be registered without an email address", "# Uncomment to chage the label for 'Staff' settings.hrm.staff_label =", "False, # fields = [(\"\", \"image\")], # filterby = dict(field", "pr_group resource (in group & org_group controllers) - runs after", "#description = \"Allows a Budget to be drawn up\", #", "#description = \"Rapid Assessments & Flexible Impact Assessments\", restricted =", "settings.L10n.decimal_separator = \".\" # Thousands separator for numbers (defaults to", "modern style #(\"budget\", Storage( # name_nice = T(\"Budgeting Module\"), #", "current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll: #", "= T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"),", "[\"document_id\", \"name\", \"url\", \"comments\", ], filterby = dict(field = \"name\")", "request the Mobile Phone when a user registers settings.auth.registration_requests_mobile_phone =", "None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility Type\"), fields", "== record.site_id) & \\ (ltable.facility_type_id == table.id) rows = db(query).select(table.name)", "limitby=(0, 1) ).first() try: pe_id = org.pe_id except: current.log.error(\"Org %s", "- creates a cms_post in the newswire - @ToDo: Send", "secure the deployment # Should users be allowed to register", "# Either Contact has changed Name or this feed is", "Allows relief agencies to coordinate their activities', restricted = True,", "= 9, # 8th item in the menu )), (\"pr\",", "= True, # module_type = 10 # )), # @ToDo:", "# Comment/uncomment modules here to disable/enable them settings.modules = OrderedDict([", "Tell the client to request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) #", "a group \"\"\" if hasattr(row, \"pr_group\"): row = row.pr_group try:", "None # No Menu )), (\"errors\", Storage( name_nice = T(\"Ticket", "Audit return True else: # Don't Audit non user-visible resources", "marker = \"%s_yellow\" % marker elif reqs == 1: #", "filters in Newsfeed when clicking on locations instead of opening", "current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- #", "= True, # module_type = None # This item is", "= [(\"\", \"facility_type_id\")], multiple = False, required = True, ),", "settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules here to", "Label for Inventory Requests settings.req.type_inv_label = \"Supplies\" # Uncomment to", "Types common to both Send and Receive settings.inv.shipment_types = {", "if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id)", "\"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return output s3.postp =", "= current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if name_exists.url == rss_url: #", "= T(\"Inventory\"), #description = \"Receiving and Sending Items\", restricted =", "mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New", "s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"phone\", label = MOBILE, multiple", "\\ (mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id,", "displayed )), (\"inv\", Storage( name_nice = T(\"Inventory\"), #description = \"Receiving", "[], # \"pr_person_details.company\" : [], # \"pr_person_details.affiliations\" : [], #", "S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent( \"human_resource\", name", "customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "(ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if", "field = \"organisation_type_id\", label = T(\"Type\"), multiple = False, #widget", "True, ), ] list_fields = [\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\",", "s3db.configure(r.tablename, crud_form = crud_form, list_fields = list_fields, ) elif r.component_name", "when a user registers settings.auth.registration_requests_mobile_phone = True # Uncomment this", "S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants", "is doing what & where\". Allows relief agencies to coordinate", "list => disabled for all (including Admin) #settings.org.dependent_fields = {", "r.method in (\"create\", \"update\"): script = \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href')", "crud_form = crud_form, onvalidation = org_facility_onvalidation, ) return True s3.prep", "postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form = crud_form, ) elif", "# Nothing to do :) return else: # Enable channel", "# #description = \"Support Requests\", # restricted = True, #", "of a new (verified) user, even if the user is", "1) ).first() try: pe_id = org.pe_id except: current.log.error(\"Org %s not", "Storage( # name_nice = T(\"Vehicles\"), # #description = \"Manage Vehicles\",", "\"contact\", name = \"facebook\", label = T(\"Facebook\"), multiple = False,", "S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\",", "Contacts they create field.default = None # Use a hierarchical", "), S3LocationFilter(\"location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\",", "None, # All Users (inc Anonymous) can see this module", "db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except:", "r.component_id: # Update form db = current.db otable = s3db.org_organisation", "for views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle =", "\\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted == False) rss", "filterby = dict(field = \"contact_method\", options = \"SMS\")), ) s3_sql_custom_fields.insert(3,", "S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter([\"name\", \"code\", \"description\",", "T(\"Reject\"), #4: T(\"Surplus\") } # ----------------------------------------------------------------------------- # Organisations # #", "}, ), #\"budget\", #\"currency\", \"comments\", ) from s3 import S3TextFilter,", "active or not as appropriate # Name field is unique", "multiple = False, fields = hr_fields, ), #S3SQLInlineComponent( # \"image\",", "language toolbar settings.L10n.display_toolbar = False # Default timezone for users", "with components (such as org/group) to breakout from tabs attr[\"native\"]", "Enable certain fields just for specific Organisations # empty list", "crud_form completes - creates a cms_post in the newswire -", "True, module_type = 9, # 8th item in the menu", "#label = T(\"Service\"), # #hidden = True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\",", "table.name.label = T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable = table.meetings.writable =", "\"%s_red\" % marker elif reqs == 2: # Medium marker", "label = T(\"Team\"), filter = True, header = \"\", hidden", "\"project_id\", \"rkey\": \"activity_type_id\", }, ), #\"budget\", #\"currency\", \"comments\", ) from", "\"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"),", "marker = db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1)", "value = rss_import, ), T(\"Don't Import Feed\")), name = \"rss\",", "\"contact_method\", options = \"RSS\" ) ), S3SQLInlineComponent( \"document\", name =", "False if tablename in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ): #", "8th item in the menu )), (\"pr\", Storage( name_nice =", "from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [", "before prep \"\"\" s3db = current.s3db table = s3db.pr_group field", "1) ).first() # Build Title & Body from the Request", "All modules below here should be possible to disable safely", "ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair: # Only used", "\"FACEBOOK\" ) ), \"meetings\", \"comments\", postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\",", "3, # Filter Activity Type by Project filter = {\"linktable\":", "#description = \"Used within Inventory Management, Request Management and Asset", "# Uncomment to show Links in Newsfeed settings.cms.show_links = True", "ptable = s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id,", "True, #header = \"\", hidden = True, ), S3LocationFilter(\"location_id\", label", "# Persons def customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom prep", "], ), S3SQLInlineComponent( \"address\", label = T(\"Address\"), multiple = False,", "== 2: # Medium marker = \"%s_yellow\" % marker elif", "module #(\"building\", Storage( # name_nice = T(\"Building Assessments\"), # #description", "def customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep =", "page?\" organisation_id = get_vars.get(\"(organisation)\", None) if organisation_id: field = s3db.hrm_human_resource.organisation_id", "assets, staff or other resources. Matches against Inventories where supplies", "# Low marker = \"%s_green\" % marker mtable = db.gis_marker", "channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: # Update", "is handled separately for the menu )), (\"appadmin\", Storage( name_nice", "main purpose is to be accessed from other modules. module_type", "== \"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False", "# name_nice = T(\"Volunteers\"), # #description = \"Human Resources Management\",", "Contact db = current.db name = form_vars.name table = s3db.msg_rss_channel", "current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\") }", "Partner Orgs S3SQLInlineComponent( \"organisation\", name = \"partner\", label = T(\"Partner", "marker = \"warehouse\" elif \"Medical Clinic\" in types: marker =", "file # image_field = s3db.pr_image.image # image_field.requires = None if", "Profile page?\" organisation_id = get_vars.get(\"(organisation)\", None) if organisation_id: field =", "return True else: # Don't Audit non user-visible resources return", "to use settings suitable for detailed Task management settings.project.mode_task =", "S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\", \"acronym\"], label", "= rtable.date_required.represent(date_required) title = \"%(priority)s by %(date)s\" % dict(priority=priority, date=date)", "Virtual Field to show the chairperson of a group \"\"\"", "= current.s3db table = s3db.pr_group field = table.group_type field.default =", "), # @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols =", "# ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html import DIV, INPUT", "= \"twitter\", label = T(\"Twitter\"), multiple = False, fields =", "_id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check Now async", "# #description = \"Allow affected individuals & households to register", "name_nice = T(\"Content Management\"), #description = \"Content Management System\", restricted", "= list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr):", "= s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id", "(Facilities-only for now) was last contacted settings.org.site_last_contacted = True #", "T(\"Network\"), #filter = True, #header = \"\", hidden = True,", "S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if r.method in (\"create\", \"update\"):", "ltable = db.org_site_facility_type query = (ltable.site_id == record.site_id) & \\", "Organisations # # Enable the use of Organisation Groups settings.org.groups", "False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False", "DIV, INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method !=", "= list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if r.component_name ==", "changed Name or this feed is associated with # another", "elif \"Food\" in types: marker = \"food\" elif \"Relief Site\"", "args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- # Human Resource", "capacity and breakdown of victims in Shelters\", # restricted =", "by Project filter = {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\",", "Uncomment to use Bookmarks in Newsfeed settings.cms.bookmarks = True #", "in Newsfeed be open by default settings.cms.filter_open = True #", "#image_field.widget = None from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields =", "settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields += [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"),", "# ----------------------------------------------------------------------------- # Organisations # # Enable the use of", "if not r.component and r.method in (\"create\", \"update\"): script =", "T(\"Service\"), # #hidden = True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label =", "label = T(\"Facebook\"), multiple = False, fields = [(\"\", \"value\")],", "adjust filters in Newsfeed when clicking on locations instead of", "T(\"Start Date\"), hide_time = True, #hidden = True, ), S3DateFilter(\"end_date\",", "\"project_project\" table.code.label = T(\"Project blurb (max. 100 characters)\") table.code.max_length =", "# Hide Private Residences from s3 import FS s3.filter =", "2, # )), (\"cms\", Storage( name_nice = T(\"Content Management\"), #description", "# Uncomment to allow Staff & Volunteers to be registered", "skill_represent(skill.skill_id)) body = \"%s\\n%s\" % (item, body) # Lookup series_id", "None else: # Create form: Default rss_import = None mtable", "else: result = True if r.interactive or r.representation == \"aadata\":", "if tablename in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ): # Perform", "the default menu & access the controller module_type = None", "S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form =", "= True, ), ] s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets =", "& Table ACLs # Enable this to have Open links", "form.record: # Update form old_rss = form.record.sub_rsscontact import json data", "internal support requests #(\"support\", Storage( # name_nice = T(\"Support\"), #", "\"rss\", label = T(\"RSS\"), multiple = False, fields = [(\"\",", "this feed is associated with # another Contact # -", "False settings.pr.request_gender = False # Doesn't yet work (form fails", "restricted = True, module_type = 10 )), #(\"proc\", Storage( #", "This item is handled separately for the menu )), #", "S3LocationSelectorWidget2 table = s3db.org_facility field = table.location_id if r.method in", "rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill in skills: item =", ") if r.interactive: from gluon.html import DIV, INPUT from s3", "result = True if r.interactive or r.representation == \"aadata\": table", "channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: # Check", "Uncomment to disable the use of HR Certificates settings.hrm.use_certificates =", "if date_required: date = rtable.date_required.represent(date_required) title = \"%(priority)s by %(date)s\"", "True # Extra fields to search in Autocompletes & display", "org_group_team = \"group_id\", ) s3db.configure(\"pr_group\", # Redirect to member list", "controllers) - runs after controller customisation - but runs before", "= T(\"Training\"), hidden = True, ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"),", "name url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel (&", "by open/priority requests reqs = record.reqs if reqs == 3:", "= customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management settings.req.req_type = [\"People\",", "), \"name\", \"location_id\", ) s3db.configure(tablename, crud_form = crud_form, onvalidation =", "for specific Organisations # empty list => disabled for all", "= True, # module_type = 10 # )), (\"asset\", Storage(", "to disable checking that LatLons are within boundaries of their", "\"url\")], filterby = dict(field = \"name\", options=\"iCal\" ) ), S3SQLInlineComponent(", "# Uncomment these to use US-style dates in English settings.L10n.date_format", "# Ultimately should go into location_id$addr_street fields = [(\"\", \"comments\")],", "[#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\",", "# Number formats (defaults to ISO 31-0) # Decimal separator", "Request details priority = rtable.priority.represent(row.priority) date_required = row.date_required if date_required:", "\"filter-search\", ), S3OptionsFilter(\"status_id\", label = T(\"Status\"), # Not translateable #represent", "= \"on\" else: # Default rss_import = None else: #", "& Receives Alerts via Email & SMS\", restricted = True,", "rows] # Use Marker in preferential order if \"Hub\" in", "(& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import: # Nothing", "Uncomment to hide inv & req tabs from Sites #settings.org.site_inv_req_tabs", "s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try:", "# Always notify the approver of a new (verified) user,", "old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: # Nothing to", "T(\"Vehicles\"), # #description = \"Manage Vehicles\", # restricted = True,", "dict(field = \"level\", options = \"L4\" ), # @ToDo: GroupedCheckbox", "S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"), fields = [(\"\", \"group_id\"), (\"\",", "def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource resource (in facility, human_resource,", "= IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels =", "Default rss_import = None else: ctable = s3db.pr_contact query =", "& \\ (gtable.level == \"L2\") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first()", "form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if", "return elif url_exists.enabled: # Nothing to do :) return else:", "), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink( \"service\", label =", "\"L2\", \"L3\", \"L4\"), hidden = True, ), S3OptionsFilter(\"site_id\", hidden =", "True settings.req.items_ask_purpose = False #settings.req.use_req_number = False # Label for", "Clinic\" in types: marker = \"hospital\" elif \"Food\" in types:", "hierarchical org_service: #leafonly = False, #widget = \"hierarchy\", ), S3SQLInlineComponent(", "requests reqs = record.reqs if reqs == 3: # High", "standard postp if callable(standard_postp): output = standard_postp(r, output) if r.interactive", "the Site settings.req.requester_from_site = True # Label for Inventory Requests", "settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment to hide inv", "specific Organisations # empty list => disabled for all (including", "the Location Selector to just certain countries # NB This", "from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field =", "(\"L1\", \"L2\", \"L3\", \"L4\"), #hidden = True, ), # @ToDo:", "the Survey module #(\"building\", Storage( # name_nice = T(\"Building Assessments\"),", "# Do new users need to verify their email address?", "HR Trainings settings.hrm.use_trainings = False # Uncomment to disable the", "= current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db s3", "def customise_org_group_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom", "True if r.id: # Update form ctable = s3db.pr_contact query", "True, ), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden = True, ),", "name_nice = T(\"Shelters\"), # #description = \"Tracks the location, capacity", "name_exists.channel_id) return else: # Check if we already have a", "), # activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"),", "to disable Staff experience settings.hrm.staff_experience = False # Uncomment to", "Requester as being from the Site settings.req.requester_from_site = True #", "for allocation of appropriate Resources (Human, Assets & Facilities).\", restricted", "Organisations # empty list => disabled for all (including Admin)", "list_fields = [\"name\", \"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields =", "s3 = current.response.s3 # Custom prep standard_prep = s3.prep def", "is from same org/site as Contacts they create field.default =", "\"L4\" ), # @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols", "doing what & where\". Allows relief agencies to coordinate their", ")), (\"assess\", Storage( name_nice = T(\"Assessments\"), #description = \"Rapid Assessments", "user-visible resources return False settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- #", "an Autocomplete for Site lookup fields settings.org.site_autocomplete = True #", "Inventory Management, Request Management and Asset Management\", restricted = True,", "restricted = True, # module_type = 10, # )), ])", "(T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"),", "where supplies are requested.\", restricted = True, module_type = 1,", "\"iCal\", label = \"iCAL\", multiple = False, fields = [(\"\",", "10 # )), # @ToDo: Port these Assessments to the", "# Uncomment to show the date when a Site (Facilities-only", ": [], # \"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [],", "table = r.table tablename = \"project_project\" table.code.label = T(\"Project blurb", "record, representation): if not current.auth.user: # Don't include prepop return", "newly-registered users get automatically #settings.auth.registration_roles = { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to", "def customise_org_facility_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Tell", "if r.method not in (\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None)", "the menu )), (\"appadmin\", Storage( name_nice = T(\"Administration\"), #description =", "\"contact_method\", options = \"EMAIL\" ) ), \"website\", S3SQLInlineComponent( \"contact\", comment", "users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields", "name = \"image\", # label = T(\"Photo\"), # multiple =", "Uncomment to show created_by/modified_by using Names not Emails settings.ui.auth_user_represent =", ")), # @ToDo: Rewrite in a modern style #(\"budget\", Storage(", "# Default rss_import = None else: ctable = s3db.pr_contact query", "Uncomment to disable the use of HR Skills #settings.hrm.use_skills =", "else: # Check if we already have a channel for", "True # Uncomment to show Tags in Newsfeed settings.cms.show_tags =", "False # Default timezone for users settings.L10n.utc_offset = \"UTC -0500\"", "T(\"Group Chairperson\") if r.component_name == \"group_membership\": from s3layouts import S3AddResourceLink", "= current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby = (0, 1) ).first()", "Remember that we don't wish to import rss_import = \"on\"", "import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\",", "= True, #header = \"\", hidden = True, ), S3LocationFilter(\"location_id\",", "= crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource #", "Always notify the approver of a new (verified) user, even", "= dict(field = \"contact_method\", options = \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent(", "disable safely (\"hrm\", Storage( name_nice = T(\"Contacts\"), #description = \"Human", "= False # Uncomment to enable the use of HR", "\"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"), field = \"activity_type_id\",", "name_nice = T(\"Map\"), #description = \"Situation Awareness & Geospatial Analysis\",", "Custom prep standard_prep = s3.prep def custom_prep(r): # Call standard", "\"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink( \"service\", label", "= pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form = crud_form, ) elif r.component_name", "@ToDo: Make this optional? multiple = False, ), \"meetings\", \"comments\",", "S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name = \"phone\", label", "audit_write(method, tablename, form, record, representation): if not current.auth.user: # Don't", "), \"job_title_id\", \"start_date\", ) list_fields = [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\",", "= None script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/',", "to ISO 31-0) # Decimal separator for numbers (defaults to", "Volunteers to be registered without an Organisation settings.hrm.org_required = False", "a user registers #settings.auth.registration_requests_site = True # Roles that newly-registered", "form_vars.name = address else: # We need a default form_vars.name", "limitby = (0, 1) ).first() if url_exists: # We have", "isn't normally required. Rather it's main purpose is to be", "[row.name for row in rows] # Use Marker in preferential", "ACLs to control access to this module access = None,", "if chair: # Only used in list view so HTML", "as org/group) to breakout from tabs attr[\"native\"] = True return", "T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\") } # -----------------------------------------------------------------------------", "1 for the URL # Disable the old Contact one", "Administration\", restricted = True, access = \"|1|\", # Only Administrators", "from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if", "result = standard_prep(r) if not result: return False from s3", "States Dollars\"), } settings.L10n.languages = OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"),", "PDF to Letter settings.base.paper_size = T(\"Letter\") # Restrict the Location", "T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), #hidden = True,", "Inventory Management settings.inv.facility_label = \"Facility\" # Uncomment if you need", "record.reqs if reqs == 3: # High marker = \"%s_red\"", "True, ), # activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label =", "and link the URL one to this Contact # and", "field.default = None # Use a hierarchical dropdown instead of", "#else: # # Create a new Feed # pass #", "Core modules which shouldn't be disabled (\"default\", Storage( name_nice =", "= \"|1|\", # Only Administrators can see this module in", "\"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields = [(\"\", \"org_group_id\")], #", "set rss_import correctly\" % r.component_id) # Default rss_import = None", "[(\"\", \"url\")], filterby = dict(field = \"name\", options=\"Data\" ) ),", "\"UTC -0500\" # Uncomment these to use US-style dates in", "= \"Facility\" # Uncomment if you need a simpler (but", "Date\"), hide_time = True, #hidden = True, ), S3DateFilter(\"end_date\", label", "Use ACLs to control access to this module access =", "10 )), #(\"proc\", Storage( # name_nice = T(\"Procurement\"), # #description", "False table.type.readable = table.type.writable = False return result s3.prep =", "filter_widgets = filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group", "numbers settings.L10n.default_country_code = 1 # Enable this to change the", "Inventories where supplies are requested.\", restricted = True, module_type =", "restricted = True, module_type = 1, )), (\"project\", Storage( name_nice", "pe_id = org.pe_id except: current.log.error(\"Org %s not found: cannot set", "of widget from s3 import s3_comments_widget table.description.widget = s3_comments_widget from", "prep if callable(standard_prep): result = standard_prep(r) if not result: return", "#hidden = True, ), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels =", "#(\"vehicle\", Storage( # name_nice = T(\"Vehicles\"), # #description = \"Manage", "field = table.location_id field.label = \"\" # Gets replaced by", "marker = \"residence\" #elif \"Shelter\" in types: # marker =", "= org.pe_id except: current.log.error(\"Org %s not found: cannot set rss_import", "to secure the deployment # Should users be allowed to", "= (\"L1\", \"L2\", \"L3\", \"L4\"), hidden = True, ), S3OptionsFilter(\"site_id\",", "S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\",", "administrator prior to being able to login? settings.auth.registration_requires_approval = True", "the controller module_type = None # This item is not", "= ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id)", "get_vars.get(\"(site)\", None) if site_id: field = s3db.hrm_human_resource.site_id field.default = site_id", "name_nice = T(\"Assets\"), #description = \"Recording and Assigning Assets\", restricted", "\"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs)", "filterby = dict(field = \"role\", options = \"2\" ) ),", "10 # )), (\"asset\", Storage( name_nice = T(\"Assets\"), #description =", "to just certain countries # NB This can also be", "in Autocompletes & display in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\",", "Admin) #settings.org.dependent_fields = { \\ # \"pr_person_details.mother_name\" : [], #", "include Email/Phone for unauthenticated users if current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone()", "name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name)", ") elif r.component_name == \"organisation\": # Add Network Status to", ")), (\"survey\", Storage( name_nice = T(\"Surveys\"), #description = \"Create, enter,", "to breakout from tabs attr[\"native\"] = True return attr settings.customise_org_group_controller", "{ #21: T(\"Distribution\") } settings.inv.send_type_default = 1 settings.inv.item_status = {", "[ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\",", "S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden = True, ), ] #", "a cms_post in the newswire - @ToDo: Send out Tweets", "= current.s3db if r.tablename == \"org_organisation\": if r.id: # Update", "Uncomment this to use settings suitable for detailed Task management", "Uncomment to use org_group_id in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" #", "current.s3db table = s3db.pr_group field = table.group_type field.default = 3", "Uncomment to use organisation_id instead of created_by in Newsfeed settings.cms.organisation", "chairperson(row): \"\"\" Virtual Field to show the chairperson of a", "\"location.location_id\"), ] s3db.configure(tablename, crud_form = crud_form, filter_widgets = filter_widgets, list_fields", "#settings.inv.direct_stock_edits = True # Uncomment to call Stock Adjustments, 'Stock", "Prepared\") # Theme (folder to use for views/layout.html) settings.base.theme =", "False, # This is just Text - put into the", "Storage( name_nice = T(\"Home\"), restricted = False, # Use ACLs", "True # Enable certain fields just for specific Organisations #", "from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from gluon.html", "% marker elif reqs == 2: # Medium marker =", "# name_nice = T(\"Procurement\"), # #description = \"Ordering & Purchasing", "+= [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name", "Name field is unique so rename old one name_exists.update_record(name=\"%s (Old)\"", "\"location_id\", filterby = dict(field = \"level\", options = \"L4\" ),", "# ----------------------------------------------------------------------------- # Human Resource Management # Uncomment to chage", "= db.org_facility_type ltable = db.org_site_facility_type query = (ltable.site_id == record.site_id)", "types = [row.name for row in rows] # Use Marker", "crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label =", "\"contact\", name = \"email\", label = T(\"Email\"), multiple = False,", "db.org_site_facility_type query = (ltable.site_id == record.site_id) & \\ (ltable.facility_type_id ==", "r.interactive: if r.component_name == \"facility\": if r.method in (None, \"create\",", "to both Send and Receive settings.inv.shipment_types = { 1: T(\"Other", "table.enabled, limitby = (0, 1) ).first() if url_exists: # We", "Need to re-do list_fields as get over_written by hrm_group_controller() list_fields", "# Uncomment to Hide the language toolbar settings.L10n.display_toolbar = False", "not r.component: table = s3db.org_group list_fields = [\"name\", \"mission\", \"website\",", "restricted = True, module_type = 5, )), (\"event\", Storage( name_nice", "s3db.pr_group field = table.group_type field.default = 3 # Relief Team,", "Uncomment to show post Titles in Newsfeed settings.cms.show_titles = True", "'Search' without input to list all.\"), #_class = \"filter-search\", ),", "'blurb' in NYC) settings.project.codes = True # Uncomment this to", "by widget #field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires = IS_EMPTY_OR(", "to call Stock Adjustments, 'Stock Counts' settings.inv.stock_count = True #", "per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP standard_prep = s3.prep", "by network name. You may use % as wildcard. Press", "settings.auth.registration_requests_mobile_phone = True # Uncomment this to request the Organisation", "----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default the name to the Street", "import A, URL from gluon.storage import Storage from s3 import", "Add RSS Channel _id = table.insert(name=name, enabled=True, url=rss_url) record =", "activate hierarchical org_service: #leafonly = False, #widget = \"hierarchy\", ),", ")), #(\"cr\", Storage( # name_nice = T(\"Shelters\"), # #description =", "use org_group_id in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment to", "\"contact\", name = \"twitter\", label = T(\"Twitter\"), multiple = False,", "& \\ (ltable.facility_type_id == table.id) rows = db(query).select(table.name) types =", "\"FACEBOOK\" ) ), \"comments\", postprocess = pr_contact_postprocess, ) from s3", "= custom_prep # Custom postp standard_postp = s3.postp def custom_postp(r,", "search in Autocompletes & display in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\",", "= True, ), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden = True,", "ritable.quantity) item_represent = s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item in", "# Uncomment to use person_id instead of created_by in Newsfeed", "settings.has_module(\"req\"): # Colour code by open/priority requests reqs = record.reqs", "view so HTML is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id))", "channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- # Human Resource Management", "Organizations\"), fields = [\"organisation_id\", \"comments\", # NB This is labelled", "date_required = row.date_required if date_required: date = rtable.date_required.represent(date_required) title =", "after controller customisation - but runs before prep \"\"\" s3db", "rss_url: if form.record: # Update form old_rss = form.record.sub_rsscontact import", "\"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"),", "& person controllers) - runs after controller customisation - but", "for detailed Task management settings.project.mode_task = False # Uncomment this", "9, # 8th item in the menu )), (\"pr\", Storage(", "= False, fields = hr_fields, ), #S3SQLInlineComponent( # \"image\", #", "= standard_prep(r) else: result = True if not r.component: table", "HR Education settings.hrm.use_education = False # Uncomment to disable the", "module in the default menu (access to controller is possible", "#description = \"Membership Management System\", # restricted = True, #", "breakout from tabs attr[\"native\"] = True return attr settings.customise_org_group_controller =", "S3OptionsFilter, S3TextFilter # activate hierarchical org_service: #from s3 import S3LocationFilter,", "_href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr):", "Uncomment if you need a simpler (but less accountable) process", "to not track pack values settings.inv.track_pack_values = False settings.inv.send_show_org =", "elif reqs == 2: # Medium marker = \"%s_yellow\" %", "= db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first()", "form # Hide most Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent", "# Prepop hasn't been run series_id = None # Location", "settings.auth.registration_requests_organisation = True # Uncomment this to request the Site", "False #settings.req.use_req_number = False # Label for Requester settings.req.requester_label =", "custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr):", "r.id: # Update form ctable = s3db.pr_contact query = (ctable.pe_id", "S3SQLInlineComponent( \"contact\", name = \"email\", label = T(\"Email\"), multiple =", "\"value\")], filterby = dict(field = \"contact_method\", options = \"RSS\" )", "settings.cms.filter_open = True # Uncomment to adjust filters in Newsfeed", "Building\" in types: marker = \"residence\" #elif \"Shelter\" in types:", "Rather it's main purpose is to be accessed from other", ")), #(\"member\", Storage( # name_nice = T(\"Members\"), # #description =", "\"service\", label = T(\"Services\"), field = \"service_id\", # activate hierarchical", "= list_fields, ) elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group", "import S3SQLCustomForm, S3SQLInlineComponent # We default this onvalidation table.name.notnull =", "Storage( # name_nice = T(\"Volunteers\"), # #description = \"Human Resources", "= 10 # )), (\"asset\", Storage( name_nice = T(\"Assets\"), #description", "rows = db(query).select(table.name) types = [row.name for row in rows]", "= \"food\" elif \"Relief Site\" in types: marker = \"asset\"", "False, required = True, ), \"name\", \"location_id\", ) s3db.configure(tablename, crud_form", "return False if tablename in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ):", "limitby=(0, 1) ).first() except: marker = db(mtable.name == \"office\").select(mtable.image, mtable.height,", "# Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else:", "Facilities in Inventory Management settings.inv.facility_label = \"Facility\" # Uncomment if", "= db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments,", "False, fields = [(\"\", \"url\")], filterby = dict(field = \"name\",", "output: # Custom Tabs tabs = [(T(\"Basic Details\"), None), (T(\"Contacts\"),", "\"Residential Building\" in types: marker = \"residence\" #elif \"Shelter\" in", "db.org_facility_type ltable = db.org_site_facility_type query = (ltable.site_id == record.site_id) &", "= standard_prep(r) else: result = True if r.interactive: if r.component_name", "Not displayed )), (\"inv\", Storage( name_nice = T(\"Inventory\"), #description =", "HR represents settings.hrm.show_organisation = True # Uncomment to disable Staff", "from s3 import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\"", "reqs == 2: # Medium marker = \"%s_yellow\" % marker", "(None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table =", "of HR Trainings settings.hrm.use_trainings = False # Uncomment to disable", "T(\"Email\") list_fields += [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent(", "= current.s3db s3 = current.response.s3 # Custom prep standard_prep =", "settings.project.codes = True # Uncomment this to use settings suitable", "#4: T(\"Surplus\") } # ----------------------------------------------------------------------------- # Organisations # # Enable", "Activities filtered to those of parent Project settings.gis.countries = (\"US\",)", "True settings.ui.label_attachments = \"Media\" settings.ui.update_label = \"Edit\" # Uncomment to", "reqs == 3: # High marker = \"%s_red\" % marker", "# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities", "the menu # )), (\"gis\", Storage( name_nice = T(\"Map\"), #description", "True, #hidden = True, ), S3DateFilter(\"end_date\", label = T(\"End Date\"),", "item is not shown in the menu )), (\"admin\", Storage(", "True table.meetings.readable = table.meetings.writable = True if r.id: # Update", "\"Hub\" in types: marker = \"warehouse\" elif \"Medical Clinic\" in", "10, )), (\"doc\", Storage( name_nice = T(\"Documents\"), #description = \"A", "\"L2\", \"L3\", \"L4\"), #hidden = True, ), # @ToDo: Widget", "False) rss = db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and", "S3LocationSelectorWidget2 field = table.location_id field.label = \"\" # Gets replaced", "customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules here to disable/enable them", "to be registered without an Organisation settings.hrm.org_required = False #", "hasattr(row, \"pr_group\"): row = row.pr_group try: group_id = row.id except:", "Services Hierarchical settings.org.services_hierarchical = True # Set the label for", "etc.\"), fields = [\"document_id\", \"name\", \"url\", \"comments\", ], filterby =", "# Relief Team, to show up in hrm/group field.readable =", "settings.inv.shipment_types = { 1: T(\"Other Warehouse\") } settings.inv.send_types = {", "a modern style #(\"budget\", Storage( # name_nice = T(\"Budgeting Module\"),", "S3SQLInlineComponent # We default this onvalidation table.name.notnull = False table.name.requires", "settings.gis.geonames_username = \"eden_nyc\" # Uncomment to show created_by/modified_by using Names", "# restricted = False, # module_type = 10, # )),", "\"email\", label = EMAIL, multiple = False, fields = [(\"\",", "# Core modules which shouldn't be disabled (\"default\", Storage( name_nice", "r.component_name == \"organisation\": # Add Network Status to List Fields", "& Volunteers to be registered without an Organisation settings.hrm.org_required =", "of HR Certificates settings.hrm.use_certificates = False # Uncomment to disable", "separately for the menu )), # Uncomment to enable internal", "= True # Always notify the approver of a new", "item is handled separately for the menu )), (\"appadmin\", Storage(", "# Uncomment to show the Organisation name in HR represents", "No change to either Contact Name or URL if no_import:", "Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not r.component and r.method", "\"update\"): get_vars = r.get_vars # Context from a Profile page?\"", "able to login? settings.auth.registration_requires_approval = True # Always notify the", "= (\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels = (\"L2\",), points =", "if hasattr(row, \"pr_group\"): row = row.pr_group try: group_id = row.id", "% as wildcard. Press 'Search' without input to list all.\"),", "Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: #", "\"shelter\" else: # Unknown marker = \"office\" if settings.has_module(\"req\"): #", "(called 'blurb' in NYC) settings.project.codes = True # Uncomment this", "s3_comments_widget from gluon import Field table.chairperson = Field.Method(\"chairperson\", chairperson) #", "label = T(\"Network\"), #hidden = True, ), ] # Need", "a new (verified) user, even if the user is automatically", "= True if r.interactive: if r.component_name == \"facility\": if r.method", "#description = \"Tracks the location, capacity and breakdown of victims", "output[\"form\"].add_class(\"pr_person\") elif \"item\" in output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return", "disable it old_rss = data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old =", "the use of HR Certificates settings.hrm.use_certificates = False # Uncomment", "for 'Postcode' settings.ui.label_postcode = \"ZIP Code\" # Uncomment to disable", "# Uncomment to not track pack values settings.inv.track_pack_values = False", "(e.g. from Scenario templates) for allocation of appropriate Resources (Human,", "= s3db.cms_series try: series_id = db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0,", "result s3.prep = custom_prep # Custom postp standard_postp = s3.postp", "options = \"2\" ) ), S3SQLInlineComponent( \"document\", name = \"media\",", "# Only Administrators can see this module in the default", "\"organisation\": # Add Network Status to List Fields list_fields =", "name_nice = T(\"Administration\"), #description = \"Site Administration\", restricted = True,", "associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled: #", "\"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return marker #", "(T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields", "\"role\", options = \"2\" ) ), S3SQLInlineComponent( \"document\", name =", "to login? settings.auth.registration_requires_approval = True # Always notify the approver", "= None return result s3.prep = custom_prep # Custom postp", "not track pack values settings.inv.track_pack_values = False settings.inv.send_show_org = False", "their activities', restricted = True, module_type = 4 )), #", "\"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form =", "not shown in the menu )), (\"admin\", Storage( name_nice =", "), S3SQLInlineComponent( \"document\", name = \"data\", label = T(\"Data\"), multiple", "= False table.name.label = T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable =", "to ,) settings.L10n.decimal_separator = \".\" # Thousands separator for numbers", "\"popup\": # Coming from req/create form # Hide most Fields", "= crud_form, ) elif r.component_name == \"pr_group\": list_fields = [#(T(\"Network\"),", "= db.org_site_facility_type query = (ltable.site_id == record.site_id) & \\ (ltable.facility_type_id", "label = EMAIL, multiple = False, fields = [(\"\", \"value\")],", "\"Ordering & Purchasing of Goods & Services\", # restricted =", "Storage( name_nice = T(\"Synchronization\"), #description = \"Synchronization\", restricted = True,", "Tweets \"\"\" req_id = form.vars.id db = current.db s3db =", "# @ToDo: Rewrite in a modern style #(\"budget\", Storage( #", "query = (gtable.name == \"New York\") & \\ (gtable.level ==", "modules below here should be possible to disable safely (\"hrm\",", "True, ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter = True, header", "output = standard_postp(r, output) if r.interactive and isinstance(output, dict): if", "don't wish to import rss_import = \"on\" else: # Default", ") s3db.configure(\"org_group\", crud_form = crud_form, ) elif r.component_name == \"pr_group\":", "S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename ==", "data: # RSS feed is being deleted, so we should", "Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled:", "\"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\" : [], # \"pr_person_details.company\" :", "Resources (Human, Assets & Facilities).\", restricted = True, module_type =", "change the label for 'Mobile Phone' settings.ui.label_mobile_phone = \"Cell Phone\"", "Services\", # restricted = True, # module_type = 10 #", "True # Uncomment to use organisation_id instead of created_by in", "of created_by in Newsfeed settings.cms.person = \"person_id\" # Uncomment to", "S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess =", "\"website\", S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import,", "marker = \"shelter\" else: # Unknown marker = \"office\" if", "use of HR Trainings settings.hrm.use_trainings = False # Uncomment to", "S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\",", "be registered without an Organisation settings.hrm.org_required = False # Uncomment", "= True, ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter = True,", "Alerts via Email & SMS\", restricted = True, # The", "\"twitter\", label = T(\"Twitter\"), multiple = False, fields = [(\"\",", "activate hierarchical org_service: #S3SQLInlineLink( \"service\", label = T(\"Services\"), field =", "if \"form\" in output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in output and", "= True, module_type = 9, # 8th item in the", "\"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", }, ), #\"budget\", #\"currency\", \"comments\", )", "attr settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management settings.req.req_type", "# Uncomment this to request the Organisation when a user", "later # e.g. Activities filtered to those of parent Project", "image_field = s3db.pr_image.image # image_field.requires = None if r.interactive or", "s3db.configure(\"pr_group\", # Redirect to member list when a new group", "= s3db.req_req # Read the full record row = db(rtable.id", "Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers)", "S3SQLInlineComponentMultiSelectWidget( # activate hierarchical org_service: #S3SQLInlineLink( \"service\", label = T(\"Services\"),", "#hidden = True, ), # activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", #", "Team, to show up in hrm/group field.readable = field.writable =", "parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: # Check if we already", "dict(field = \"contact_method\", options = \"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\",", "- put into the Comments box for now # Ultimately", "field is unique so rename old one name_exists.update_record(name=\"%s (Old)\" %", "except: # Prepop hasn't been run series_id = None #", "import DIV, INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent if r.method", "= True, module_type = None, )), (\"msg\", Storage( name_nice =", "S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\", \"acronym\"], label =", "from the Site settings.req.requester_from_site = True # Label for Inventory", "Management\", restricted = True, module_type = None, # Not displayed", "s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled: # Nothing to do :)", "= \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"), fields =", "S3SQLInlineComponent( \"contact\", name = \"phone\", label = T(\"Phone\"), multiple =", "(\"default\", Storage( name_nice = T(\"Home\"), restricted = False, # Use", "s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: # Nothing to do :) return", "\"%m-%d-%Y\" # Start week on Sunday settings.L10n.firstDOW = 0 #", "\"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form", "if r.interactive: if r.component_name == \"facility\": if r.method in (None,", "# Update form old_rss = form.record.sub_rsscontact import json data =", "label = T(\"Phone\"), multiple = False, fields = [(\"\", \"value\")],", "show the Organisation name in HR represents settings.hrm.show_organisation = True", "False # Change the label of \"Teams\" to \"Groups\" settings.hrm.teams", "= True # ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs after crud_form", "# Disable the old Contact one and link the URL", "return attr settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db", "= \"Site Contact\" # Filter Requester as being from the", "= OrderedDict([ # Core modules which shouldn't be disabled (\"default\",", "marker elif reqs == 2: # Medium marker = \"%s_yellow\"", "receive compensation and distributions\", # restricted = False, # module_type", "to use Activities for projects settings.project.activities = True # Uncomment", "in 1! S3DateFilter(\"start_date\", label = T(\"Start Date\"), hide_time = True,", "return False from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent", "channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled: #", "Uncomment to use Rich Text editor in Newsfeed settings.cms.richtext =", "\"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types: # Hide", "\"%s %s %s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\"", "(\"en\", \"English\"), (\"es\", \"Español\"), ]) # Authentication settings # These", "(& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: # Check if", "table = s3db.org_facility field = table.location_id if r.method in (\"create\",", "label = T(\"Twitter\"), multiple = False, fields = [(\"\", \"value\")],", "[], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\" : [], #", "T(\"You can search by by group name, description or comments", "hasn't been run series_id = None # Location is that", "settings.gis.countries = (\"US\",) settings.fin.currencies = { \"USD\" : T(\"United States", "markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP standard_prep = s3.prep def", "callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive:", "= True # Roles that newly-registered users get automatically #settings.auth.registration_roles", "handled separately for the menu # )), (\"gis\", Storage( name_nice", "# name_nice = T(\"Support\"), # #description = \"Support Requests\", #", "in HR represents settings.hrm.show_organisation = True # Uncomment to disable", "\"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\"", "None, )), (\"supply\", Storage( name_nice = T(\"Supply Chain Management\"), #description", "custom_prep(r): # Call standard prep if callable(standard_prep): result = standard_prep(r)", "settings.ui.datatables_responsive = False # PDF to Letter settings.base.paper_size = T(\"Letter\")", "= 0 # Number formats (defaults to ISO 31-0) #", "prep \"\"\" s3db = current.s3db table = s3db.pr_group field =", "module_type = 10, )), (\"survey\", Storage( name_nice = T(\"Surveys\"), #description", "# ----------------------------------------------------------------------------- # Persons # Uncomment to hide fields in", ") # Uncomment to hide inv & req tabs from", "settings.base.theme = \"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle", "]) # Authentication settings # These settings should be changed", "db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return", "# module_type = 10 # )), # @ToDo: Port these", "custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r,", "s3db = current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form =", "behavior of datatables # - Disabled until tested settings.ui.datatables_responsive =", "def customise_org_organisation_resource(r, tablename): from gluon.html import DIV, INPUT from s3", "Prepop hasn't been run series_id = None # Location is", "in items: item = \"%s %s %s\" % (item.quantity, pack_represent(item.item_pack_id),", "True, ), S3DateFilter(\"end_date\", label = T(\"End Date\"), hide_time = True,", "to Manhattan db = current.db gtable = db.gis_location query =", "email address? settings.auth.registration_requires_verification = True # Do new users need", "\"human_resource\": # Don't assume that user is from same org/site", "@ToDo: Rewrite in a modern style #(\"budget\", Storage( # name_nice", "Start week on Sunday settings.L10n.firstDOW = 0 # Number formats", "T(\"Ticket Viewer\"), #description = \"Needed for Breadcrumbs\", restricted = False,", "json data = old_rss = json.loads(old_rss)[\"data\"] if data: # RSS", "have 2 feeds: 1 for the Contact & 1 for", "else: # Don't Audit non user-visible resources return False settings.security.audit_write", "priority body = row.comments if row.type == 1: # Items", "Default the name to the Street Address \"\"\" form_vars =", "name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel (&", "= S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name ==", "Management System\", restricted = True, module_type = 10, )), (\"doc\",", "), ] list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\",", "requests for supplies, assets, staff or other resources. Matches against", "wish to import rss_import = \"on\" else: # Default rss_import", "enable Summary 'Site Needs' tab for Offices/Facilities settings.req.summary = True", "settings.inv.send_types = { #21: T(\"Distribution\") } settings.inv.send_type_default = 1 settings.inv.item_status", "for Facilities in Inventory Management settings.inv.facility_label = \"Facility\" # Uncomment", "= IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, )", "name_nice = T(\"Vehicles\"), # #description = \"Manage Vehicles\", # restricted", "to enable the use of HR Education settings.hrm.use_education = False", "registers settings.auth.registration_requests_mobile_phone = True # Uncomment this to request the", "(ctable.pe_id == pe_id) & \\ (ctable.contact_method == \"RSS\") & \\", "'Site Needs' tab for Offices/Facilities settings.req.summary = True # -----------------------------------------------------------------------------", "\"Allow affected individuals & households to register to receive compensation", "= \"%s_yellow\" % marker elif reqs == 1: # Low", "\"activity_type_id\", cols = 3, # Filter Activity Type by Project", "(T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets =", "= \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent = \"%(name)s\",", "# Uncomment to show post Titles in Newsfeed settings.cms.show_titles =", "a Site (Facilities-only for now) was last contacted settings.org.site_last_contacted =", "# - Disabled until tested settings.ui.datatables_responsive = False # PDF", "# High marker = \"%s_red\" % marker elif reqs ==", "org.pe_id except: current.log.error(\"Org %s not found: cannot set rss_import correctly\"", "Activities and Tasks\", restricted = True, module_type = 10 )),", "# name_nice = T(\"Budgeting Module\"), # #description = \"Allows a", "to being able to login? settings.auth.registration_requires_approval = True # Always", "False if r.method not in (\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\",", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"FACEBOOK\"", "= \"level\", options = \"L4\" ), # @ToDo: GroupedCheckbox Widget", "\"group_membership.status_id\") return result s3.prep = custom_prep if current.auth.s3_logged_in(): # Allow", "the use of HR Description settings.hrm.use_description = False # Change", "associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import: # Nothing to", "\"site_id\", S3SQLInlineComponent( \"group_person\", label = T(\"Network\"), link = False, fields", "table = s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id, table.channel_id, table.enabled,", "s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if r.interactive: tablename =", "settings.hrm.use_education = False # Uncomment to disable the use of", "to request the Mobile Phone when a user registers settings.auth.registration_requests_mobile_phone", "), S3DateFilter(\"end_date\", label = T(\"End Date\"), hide_time = True, #hidden", "# # RHeader wants a simplified version, but don't want", "s3.prep = custom_prep if current.auth.s3_logged_in(): # Allow components with components", "\\ form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record: # Update form", "S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename", "Add Network Status to List Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\")", "form: Default rss_import = None mtable = s3db.org_group_membership mtable.group_id.widget =", "#description = 'Lists \"who is doing what & where\". Allows", "True # Uncomment this to use settings suitable for detailed", "module_type = None, )), (\"supply\", Storage( name_nice = T(\"Supply Chain", "= S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False)", "rss_import = None crud_form = S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent(", "current.db s3db = current.s3db rtable = s3db.req_req # Read the", "False # Uncomment to show the Organisation name in HR", "\"Tracking of Projects, Activities and Tasks\", restricted = True, module_type", "from s3 import s3_fullname T = current.T settings = current.deployment_settings", "detailed Task management settings.project.mode_task = False # Uncomment this to", "= record.reqs if reqs == 3: # High marker =", "body=body, location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable, record) #", "\"value\")], filterby = dict(field = \"contact_method\", options = \"SMS\")), )", "to receive compensation and distributions\", # restricted = False, #", "# Create form: Default rss_import = None crud_form = S3SQLCustomForm(", "result: return False if r.method not in (\"read\", \"update\"): types", "chairperson) # Format for filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team =", "in types: # marker = \"shelter\" else: # Unknown marker", "safely (\"hrm\", Storage( name_nice = T(\"Contacts\"), #description = \"Human Resources", "Registry\"), # #description = \"Allow affected individuals & households to", "S3DateFilter filter_widgets = [ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ],", "Audit def audit_write(method, tablename, form, record, representation): if not current.auth.user:", "to disable responsive behavior of datatables # - Disabled until", "), S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\",", "when a user registers settings.auth.registration_requests_organisation = True # Uncomment this", "IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if r.method in (\"create\",", "== \"popup\": # Coming from req/create form # Hide most", "accessed from other modules. module_type = None, )), (\"supply\", Storage(", "# Use codes for projects (called 'blurb' in NYC) settings.project.codes", "list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if r.component_name == \"group_membership\":", "module_type = None # This item is handled separately for", "\"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities", "Status to List Fields list_fields = s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\")", "Location Selector to just certain countries # NB This can", "args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from", "#\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form = crud_form,", "{ #0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"), #4:", "parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable", "== \"aadata\": if not r.component: hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\",", "# Call standard prep if callable(standard_prep): result = standard_prep(r) else:", "= (mtable.group_id == group_id) & \\ (mtable.group_head == True) &", "= IS_IMAGE() #image_field.widget = None from s3 import S3SQLCustomForm, S3SQLInlineComponent", "s3db = current.s3db s3 = current.response.s3 # Custom prep standard_prep", "= db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first()", "settings.req.requester_from_site = True # Label for Inventory Requests settings.req.type_inv_label =", "= \"shelter\" else: # Unknown marker = \"office\" if settings.has_module(\"req\"):", "= \".\" # Thousands separator for numbers (defaults to space)", "3 # Relief Team, to show up in hrm/group field.readable", "which Marker to use for Facilities Map @ToDo: Legend \"\"\"", "= T(\"Phone\"), multiple = False, fields = [(\"\", \"value\")], filterby", "is associated with # another Contact # - update Feed", "= dict(field = \"name\", options=\"iCal\" ) ), S3SQLInlineComponent( \"document\", name", "result = True s3db = current.s3db #if r.method == \"validate\":", "True s3.prep = custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller #", "req tabs from Sites #settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def", "module_type = 3, )), #(\"vol\", Storage( # name_nice = T(\"Volunteers\"),", "Widget or Hierarchical MultiSelectWidget #cols = 5, ), \"phone\", S3SQLInlineComponent(", "# # Can't validate image without the file # image_field", "settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3", "# registered in order to secure the deployment # Should", "to use for views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row = \"bootstrap\"", "----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs after crud_form completes - creates", "s3.postp def custom_postp(r, output): # Call standard postp if callable(standard_postp):", "db(query).select(table.name) types = [row.name for row in rows] # Use", "Hide Private Residences from s3 import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\")", "= s3db.org_facility field = table.location_id if r.method in (\"create\", \"update\"):", "mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except: marker = db(mtable.name", "runs after controller customisation - but runs before prep \"\"\"", "to controller is possible to all still) module_type = 10", "\"mission\", S3SQLInlineComponent( \"contact\", name = \"phone\", label = T(\"Phone\"), multiple", "#from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter filter_widgets = [", "= manhattan.id table.mission.readable = table.mission.writable = True table.meetings.readable = table.meetings.writable", "\"L4\"), #hidden = True, ), # @ToDo: Widget to handle", "s3db.configure(\"org_group\", crud_form = crud_form, ) elif r.component_name == \"pr_group\": list_fields", "#description = \"Recording and Assigning Assets\", restricted = True, module_type", "s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result s3.prep = custom_prep if", "HR Description settings.hrm.use_description = False # Change the label of", "S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True) crud_form", "marker = \"food\" elif \"Relief Site\" in types: marker =", "need to verify their email address? settings.auth.registration_requires_verification = True #", "settings.hrm.teams = \"Groups\" # Custom label for Organisations in HR", "% (item, body) else: # Skills body = \"%s\\n%s\" %", "multiple = False, required = True, ), \"name\", \"location_id\", )", "% (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\" % (item, body)", "= custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- #", "# Python 2.7 from collections import OrderedDict except: # Python", "that newly-registered users get automatically #settings.auth.registration_roles = { 0: [\"comms_dispatch\"]}", "1) ).first().id except: # Prepop hasn't been run series_id =", "(\"doc\", Storage( name_nice = T(\"Documents\"), #description = \"A library of", "from tabs attr[\"native\"] = True return attr settings.customise_org_group_controller = customise_org_group_controller", "= \"org_facility\" table = s3db[tablename] if not r.component and r.method", "# \"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\"", "= current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, ) field = r.table.site_id", "result = standard_prep(r) else: result = True if r.interactive or", "# name_nice = T(\"Members\"), # #description = \"Membership Management System\",", "request the Site when a user registers #settings.auth.registration_requests_site = True", "\"location_id\")], ), # Partner Orgs S3SQLInlineComponent( \"organisation\", name = \"partner\",", "open a full page in a new tab settings.ui.iframe_opens_full =", "customisation - but runs before prep \"\"\" s3db = current.s3db", "#0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\")", "T(\"Supply Chain Management\"), #description = \"Used within Inventory Management, Request", "restricted = True, module_type = 10, )), # Vehicle depends", "restricted = True, module_type = None, )), (\"msg\", Storage( name_nice", "= T(\"Twitter\"), multiple = False, fields = [(\"\", \"value\")], filterby", "settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3 = current.response.s3 # Custom", "for Facilities Map @ToDo: Legend \"\"\" db = current.db s3db", "all.\"), #_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden", "= (ltable.site_id == record.site_id) & \\ (ltable.facility_type_id == table.id) rows", "and r.method in (\"create\", \"update\"): script = \\ '''$('#req_req_site_id').change(function(){ var", "module_type = 2, # )), (\"cms\", Storage( name_nice = T(\"Content", "\"National Society / Branch\" settings.hrm.organisation_label = \"Organization\" # ----------------------------------------------------------------------------- def", "types: marker = \"residence\" #elif \"Shelter\" in types: # marker", "customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "(defaults to ,) settings.L10n.decimal_separator = \".\" # Thousands separator for", "# Groups def chairperson(row): \"\"\" Virtual Field to show the", "\"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form", "try: group_id = row.id except: # not available return current.messages[\"NONE\"]", "Call standard postp if callable(standard_postp): output = standard_postp(r, output) if", "Only used in list view so HTML is OK return", "enabled=True) s3db.msg_parser_enable(_id) # Check Now async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\",", "s3.prep def custom_prep(r): # Call standard prep if callable(standard_prep): result", "drawn up\", # restricted = True, # module_type = 10", "#\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent( \"location\", label = T(\"Location\"),", "Don't Audit non user-visible resources return False settings.security.audit_write = audit_write", "= \"Allows a Budget to be drawn up\", # restricted", "access the controller module_type = None # This item is", "= current.db otable = s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id,", "\"location\", label = T(\"Neighborhoods Served\"), field = \"location_id\", filterby =", "media, etc.\"), fields = [\"document_id\", \"name\", \"url\", \"comments\", ], filterby", "can also be over-ridden for specific contexts later # e.g.", "\"TWITTER\" ) ), S3SQLInlineComponent( \"contact\", name = \"facebook\", label =", "# )), (\"gis\", Storage( name_nice = T(\"Map\"), #description = \"Situation", "Gets replaced by widget levels = (\"L2\", \"L3\") field.requires =", "r.tablename == \"org_organisation\": if r.id: # Update form ctable =", "T(\"Twitter\"), multiple = False, fields = [(\"\", \"value\")], filterby =", ")), # Uncomment to enable internal support requests #(\"support\", Storage(", "be allowed to register themselves? settings.security.self_registration = \"index\" # Do", "prep if callable(standard_prep): result = standard_prep(r) else: result = True", "(\"es\", \"Español\"), ]) # Authentication settings # These settings should", "\"Allows a Budget to be drawn up\", # restricted =", ") ), \"meetings\", \"comments\", postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form", "= T(\"Network\"), #filter = True, #header = \"\", hidden =", "import OrderedDict from gluon import current from gluon.html import A,", "multiple = False, # fields = [(\"\", \"image\")], # filterby", "def req_req_postprocess(form): \"\"\" Runs after crud_form completes - creates a", "= True settings.auth.record_approval_required_for = (\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit def", "# Activities S3SQLInlineComponent( \"location\", label = T(\"Location\"), fields = [(\"\",", "to disable the use of HR Credentials settings.hrm.use_credentials = False", "current.db name = form_vars.name table = s3db.msg_rss_channel name_exists = db(table.name", "# Remember that we don't wish to import rss_import =", "None script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true", "import DIV, INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent,", "skills: item = \"%s %s\" % (skill.quantity, skill_represent(skill.skill_id)) body =", "True, # module_type = 10 # )), #(\"dvr\", Storage( #", "= db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll:", "= \"Recording and Assigning Assets\", restricted = True, module_type =", "= T(\"Type\"), multiple = False, #widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget(", "for now) was last contacted settings.org.site_last_contacted = True # Enable", "of \"Teams\" to \"Groups\" settings.hrm.teams = \"Groups\" # Custom label", "False # Uncomment to allow Staff & Volunteers to be", "(\"hrm\", Storage( name_nice = T(\"Contacts\"), #description = \"Human Resources Management\",", ")) crud_form = S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field =", "#hidden = True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden", "(\"create\", \"update\"): get_vars = r.get_vars # Context from a Profile", "(mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0,", "=> disabled for all (including Admin) #settings.org.dependent_fields = { \\", "is # registered in order to secure the deployment #", "# Label for Inventory Requests settings.req.type_inv_label = \"Supplies\" # Uncomment", "S3SQLInlineComponent( \"contact\", name = \"twitter\", label = T(\"Twitter\"), multiple =", "table.meetings.writable = True if r.id: # Update form ctable =", "in Newsfeed settings.cms.bookmarks = True # Uncomment to use have", "== pe_id) & \\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted", "else: # Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return", "Address \"\"\" form_vars = form.vars name = form_vars.get(\"name\", None) if", "not rss.poll: # Remember that we don't wish to import", "S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender = False # Doesn't yet", "= True settings.ui.label_attachments = \"Media\" settings.ui.update_label = \"Edit\" # Uncomment", "Uncomment to enable internal support requests #(\"support\", Storage( # name_nice", "= 1 settings.inv.item_status = { #0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2:", "# Requests Management settings.req.req_type = [\"People\", \"Stock\"]#, \"Summary\"] settings.req.prompt_match =", "'''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\", \"status_id\",", "'Lists \"who is doing what & where\". Allows relief agencies", "org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id", "marker = \"%s_green\" % marker mtable = db.gis_marker try: marker", "None crud_form = S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name", "doesn't currently work within an Inline Form #image_field = s3db.pr_image.image", "script = \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val()", "(T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return", "# Use ACLs to control access to this module access", "not result: return False if r.method not in (\"read\", \"update\"):", "be approved by an administrator prior to being able to", "\"RSS\" ) ), S3SQLInlineComponent( \"document\", name = \"iCal\", label =", "= table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table, record) #", "if r.interactive: tablename = \"org_facility\" table = s3db[tablename] if not", "Change the label of \"Teams\" to \"Groups\" settings.hrm.teams = \"Groups\"", "restricted = True, # The user-visible functionality of this module", "this to request the Organisation when a user registers settings.auth.registration_requests_organisation", "reqs == 1: # Low marker = \"%s_green\" % marker", "T(\"Shelters\"), # #description = \"Tracks the location, capacity and breakdown", "True # Uncomment to disable Staff experience settings.hrm.staff_experience = False", "settings.req.prompt_match = False #settings.req.use_commit = False settings.req.requester_optional = True settings.req.date_writable", "crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields =", "(gtable.level == \"L2\") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan:", "= 3, # Filter Activity Type by Project filter =", "True, # module_type = 10, # )), # @ToDo: Rewrite", "= data[0][\"value\"][\"value\"] table = s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id,", "this to use settings suitable for detailed Task management settings.project.mode_task", "[(\"\", \"url\")], filterby = dict(field = \"name\", options=\"iCal\" ) ),", "agencies to coordinate their activities', restricted = True, module_type =", "return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default the name", "= \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"), field =", "T(\"Project blurb (max. 100 characters)\") table.code.max_length = 100 table.comments.label =", "record) # Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup", "(but less accountable) process for managing stock levels #settings.inv.direct_stock_edits =", "s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check Now", "if name_exists: if name_exists.url == rss_url: # No change to", "to the Survey module #(\"building\", Storage( # name_nice = T(\"Building", "still) module_type = 10 )), (\"org\", Storage( name_nice = T(\"Locations\"),", "Projects # Use codes for projects (called 'blurb' in NYC)", "], filterby = dict(field = \"contact_method\", options = \"RSS\" )", "RSS feed is being deleted, so we should disable it", "users need to be approved by an administrator prior to", "== row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create Post ptable =", "Add source link url = \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\",", "S3SQLInlineComponent( \"contact\", name = \"phone2\", label = T(\"Phone2\"), multiple =", "label = T(\"Network\"), link = False, fields = [(\"\", \"group_id\")],", "name_nice = T(\"Events\"), #description = \"Activate Events (e.g. from Scenario", "(defaults to ISO 31-0) # Decimal separator for numbers (defaults", "Storage( # name_nice = T(\"Building Assessments\"), # #description = \"Building", "Tasks\", restricted = True, module_type = 10 )), (\"assess\", Storage(", "import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if", "customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource resource (in facility, human_resource, organisation", "in types: marker = \"food\" elif \"Relief Site\" in types:", "(\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types: #", "\"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form", "address: form_vars.name = address else: # We need a default", "options = \"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\", name = \"email\",", "current.s3db s3 = current.response.s3 # Tell the client to request", "s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True s3.prep = custom_prep return attr", "Administrators can see this module in the default menu (access", "table = s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) #", "# Uncomment this to use Milestones in project/task. settings.project.milestones =", "True, # module_type = None # This item is handled", "current.s3db s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy = 5 # Controller, Function &", "r.method in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2,", "True, module_type = 10, )), (\"survey\", Storage( name_nice = T(\"Surveys\"),", "when a new group has been created create_next = URL(c=\"hrm\",", "s3 import s3_fullname T = current.T settings = current.deployment_settings \"\"\"", "import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field = table.location_id if", "[(\"\", \"group_id\"), (\"\", \"status_id\"), ], ), S3SQLInlineComponent( \"address\", label =", "Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled:", "Marker in preferential order if \"Hub\" in types: marker =", "= False, fields = [(\"\", \"group_id\")], multiple = False, ),", "crud_form, onvalidation = org_facility_onvalidation, ) return True s3.prep = custom_prep", "required. Rather it's main purpose is to be accessed from", "Residence\" if r.interactive: tablename = \"org_facility\" table = s3db[tablename] if", "# Projects # Use codes for projects (called 'blurb' in", "(\"sync\", Storage( name_nice = T(\"Synchronization\"), #description = \"Synchronization\", restricted =", "T(\"Chairperson\") return True s3.prep = custom_prep return attr settings.customise_pr_group_controller =", "s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget =", "tablename, form, record, representation): if not current.auth.user: # Don't include", "created_by in Newsfeed settings.cms.person = \"person_id\" # Uncomment to use", "S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter = True, header = \"\",", "----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db s3 = current.response.s3 #", ") ), \"comments\", postprocess = pr_contact_postprocess, ) from s3 import", "T(\"Neighborhoods Served\"), field = \"location_id\", filterby = dict(field = \"level\",", "and r.method in (None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2,", "& org_group controllers) - runs after controller customisation - but", "= dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not", "do :) return else: # Enable channel (& associated parsers)", "limitby=(0, 1) ).first().location_id # Create Post ptable = s3db.cms_post _id", "%s %s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\" %", "(defaults to space) settings.L10n.thousands_separator = \",\" # Default Country Code", "of parent Project settings.gis.countries = (\"US\",) settings.fin.currencies = { \"USD\"", "return True s3.prep = custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller", "(T(\"Assets\"), \"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return output s3.postp", "# - update Feed name url_exists.update_record(name=name) if no_import: if url_exists.enabled:", "this URL url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby", "\"location_id$addr_street\", ) # Uncomment to hide inv & req tabs", "\"Private Residence\" if r.interactive: tablename = \"org_facility\" table = s3db[tablename]", "name_nice = T(\"Locations\"), #description = 'Lists \"who is doing what", "None, # Not displayed )), (\"inv\", Storage( name_nice = T(\"Inventory\"),", "Template settings for NYC Prepared \"\"\" # Pre-Populate settings.base.prepopulate =", "or not as appropriate # Name field is unique so", "have Open links in IFrames open a full page in", "#header = \"\", hidden = True, ), S3LocationFilter(\"location_id\", label =", "\"contact_method\", options = \"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form", "comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import, ), T(\"Don't Import", "are within boundaries of their parent #settings.gis.check_within_parent_boundaries = False #", "= \"Human Resources Management\", # restricted = True, # module_type", "\"document\", name = \"media\", label = T(\"URLs (media, fundraising, website,", "when a Site (Facilities-only for now) was last contacted settings.org.site_last_contacted", "Items\", restricted = True, module_type = 10 )), #(\"proc\", Storage(", "Uncomment to chage the label for 'Staff' settings.hrm.staff_label = \"Contacts\"", "\"poll\"), ], filterby = dict(field = \"contact_method\", options = \"RSS\"", ")), (\"pr\", Storage( name_nice = T(\"Person Registry\"), #description = \"Central", "using Names not Emails settings.ui.auth_user_represent = \"name\" # Record Approval", "json.loads(old_rss)[\"data\"] if data: # RSS feed is being deleted, so", "%s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\" % (item,", "# ----------------------------------------------------------------------------- # Audit def audit_write(method, tablename, form, record, representation):", "db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create Post ptable", "(None, \"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field", "enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table, record) # Enable channel_id", "\"email\", label = T(\"Email\"), multiple = False, fields = [(\"\",", "new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments = \"Media\" settings.ui.update_label =", "Enable the use of Organisation Groups settings.org.groups = \"Network\" #", "S3SQLInlineComponent( \"human_resource\", name = \"human_resource\", label = \"\", multiple =", "# activate hierarchical org_service: #from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter,", "tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not r.component and r.method in", "= [\"organisation_id\", \"job_title_id\", \"site_id\", ] if r.method in (\"create\", \"update\"):", "= \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # #", "as photos, documents and reports\", restricted = True, module_type =", "HR module #settings.hrm.organisation_label = \"National Society / Branch\" settings.hrm.organisation_label =", "is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"]", "-0500\" # Uncomment these to use US-style dates in English", "#description = \"Sends & Receives Alerts via Email & SMS\",", "form_vars = form.vars name = form_vars.get(\"name\", None) if name: return", "the full record row = db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id,", "_id = table.insert(name=name, enabled=True, url=rss_url) record = dict(id=_id) s3db.update_super(table, record)", "= True, #hidden = True, ), S3DateFilter(\"end_date\", label = T(\"End", "----------------------------------------------------------------------------- # Projects # Use codes for projects (called 'blurb'", "T(\"Status\"), # Not translateable #represent = \"%(name)s\", cols = 3,", "Storage( # name_nice = T(\"Procurement\"), # #description = \"Ordering &", "s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\", \"status_id\", \"start_date\",", "purpose is to be accessed from other modules. module_type =", "\"Facility\" # Uncomment if you need a simpler (but less", "= dict(field = \"contact_method\", options = \"FACEBOOK\" ) ), \"comments\",", "list_fields = [\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"),", "optional? multiple = False, ), \"meetings\", \"comments\", ) filter_widgets =", "this optional? multiple = False, ), \"meetings\", \"comments\", ) filter_widgets", "to request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP standard_prep", "limitby = (0, 1) ).first() if url_exists: # Either Contact", "attr settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\"", "# \"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\"", "return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects #", "menu )), # Uncomment to enable internal support requests #(\"support\",", "we don't wish to import rss_import = \"on\" else: #", "= None else: # Create form: Default rss_import = None", "req_req_postprocess if not r.component and r.method in (\"create\", \"update\"): script", "US-style dates in English settings.L10n.date_format = \"%m-%d-%Y\" # Start week", "a new Feed # pass # Add RSS Channel _id", "to record details on People\", restricted = True, access =", "module_type = 10, )), (\"doc\", Storage( name_nice = T(\"Documents\"), #description", "simpler (but less accountable) process for managing stock levels #settings.inv.direct_stock_edits", "Skills body = \"%s\\n%s\" % (row.purpose, body) rstable = s3db.req_req_skill", "from a Profile page?\" organisation_id = get_vars.get(\"(organisation)\", None) if organisation_id:", "for supplies, assets, staff or other resources. Matches against Inventories", "1! S3DateFilter(\"start_date\", label = T(\"Start Date\"), hide_time = True, #hidden", "marker = \"asset\" elif \"Residential Building\" in types: marker =", "use of HR Skills #settings.hrm.use_skills = False # Uncomment to", "fails to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts = False #", "Uncomment to call Stock Adjustments, 'Stock Counts' settings.inv.stock_count = True", "\\ (ctable.deleted == False) rss = db(query).select(ctable.poll, limitby=(0, 1) ).first()", "hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None) if site_id: field = s3db.hrm_human_resource.site_id", "Newsfeed settings.cms.show_titles = True # ----------------------------------------------------------------------------- # Inventory Management #", "r.component_name == \"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\",", "settings.inv.item_status = { #0: current.messages[\"NONE\"], #1: T(\"Dump\"), #2: T(\"Sale\"), #3:", "), S3OptionsFilter(\"site_id\", hidden = True, ), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"),", "to use Milestones in project/task. settings.project.milestones = False # Uncomment", "OrderedDict([ # Core modules which shouldn't be disabled (\"default\", Storage(", "= \"Contacts\" # Uncomment to allow Staff & Volunteers to", "result = standard_prep(r) else: result = True if not r.component:", "Name or URL if no_import: if name_exists.enabled: # Disable channel", "== 3: # High marker = \"%s_red\" % marker elif", "if the user is automatically approved #settings.auth.always_notify_approver = False #", "output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\")", "rtable = s3db.req_req # Read the full record row =", "Create Post ptable = s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body,", "= { #21: T(\"Distribution\") } settings.inv.send_type_default = 1 settings.inv.item_status =", "in types: marker = \"residence\" #elif \"Shelter\" in types: #", "dict(field = \"contact_method\", options = \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\",", "to use have Filter form in Newsfeed be open by", "to submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts = False # -----------------------------------------------------------------------------", "= T(\"Service\"), # #hidden = True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label", "project/task. settings.project.milestones = False # Uncomment this to disable Sectors", "Create a new Feed # pass # Add RSS Channel", "required = True, ), \"name\", \"location_id\", ) s3db.configure(tablename, crud_form =", "codes for projects (called 'blurb' in NYC) settings.project.codes = True", "= customise_org_group_controller # ----------------------------------------------------------------------------- # Persons # Uncomment to hide", "Uncomment to not track pack values settings.inv.track_pack_values = False settings.inv.send_show_org", "tablename in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ): # Perform normal", "dict(field = \"contact_method\", options = \"FACEBOOK\" ) ), \"meetings\", \"comments\",", "\"document\", name = \"iCal\", label = \"iCAL\", multiple = False,", "# Unknown marker = \"office\" if settings.has_module(\"req\"): # Colour code", "#21: T(\"Distribution\") } settings.inv.send_type_default = 1 settings.inv.item_status = { #0:", "\"Building Safety Assessments\", # restricted = True, # module_type =", "True, ), S3LocationFilter(\"organisation_location.location_id\", label = T(\"Neighborhood\"), levels = (\"L3\", \"L4\"),", "# Enable this to change the label for 'Postcode' settings.ui.label_postcode", "\"\", hidden = True, ), ] s3db = current.s3db s3db.configure(\"hrm_human_resource\",", "reqs = record.reqs if reqs == 3: # High marker", "login? settings.auth.registration_requires_approval = True # Always notify the approver of", "name_nice = T(\"Projects\"), #description = \"Tracking of Projects, Activities and", "Storage( name_nice = T(\"Projects\"), #description = \"Tracking of Projects, Activities", "up in hrm/group field.readable = field.writable = False table.name.label =", "= current.db name = form_vars.name table = s3db.msg_rss_channel name_exists =", "label = T(\"RSS\"), multiple = False, fields = [(\"\", \"value\"),", "standard_postp(r, output) if r.interactive and isinstance(output, dict): if \"form\" in", "S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import, ),", "multiple = False, #widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate", "- but runs before prep \"\"\" s3db = current.s3db from", "# Default rss_import = None else: # Create form: Default", "& req tabs from Sites #settings.org.site_inv_req_tabs = True # -----------------------------------------------------------------------------", "# Uncomment to call Stock Adjustments, 'Stock Counts' settings.inv.stock_count =", "False # Uncomment to disable the use of HR Trainings", "\"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) #", "= False, ), \"job_title_id\", \"start_date\", ) list_fields = [\"id\", \"person_id\",", "current.auth.is_logged_in(): MOBILE = settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields += [(MOBILE,", "} settings.inv.send_types = { #21: T(\"Distribution\") } settings.inv.send_type_default = 1", "A, URL from gluon.storage import Storage from s3 import s3_fullname", "if reqs == 3: # High marker = \"%s_red\" %", "\"validate\": # # Can't validate image without the file #", "settings.hrm.use_description = False # Change the label of \"Teams\" to", "EMAIL, multiple = False, fields = [(\"\", \"value\")], filterby =", "\"Groups\" # Custom label for Organisations in HR module #settings.hrm.organisation_label", "of digital resources, such as photos, documents and reports\", restricted", ")), #(\"dvr\", Storage( # name_nice = T(\"Disaster Victim Registry\"), #", "is from same org/site as Contacts they create r.component.table.site_id.default =", "label = MOBILE, multiple = False, fields = [(\"\", \"value\")],", "IFrames open a full page in a new tab settings.ui.iframe_opens_full", "organisation_id: field = s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable = field.writable", "\"Human Resources Management\", restricted = True, module_type = 3, )),", "in the default menu & access the controller module_type =", "T(\"Inventory\"), #description = \"Receiving and Sending Items\", restricted = True,", "s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields,", "form db = current.db otable = s3db.org_organisation org = db(otable.id", "= \"post_organisation_group.group_id\" # Uncomment to use person_id instead of created_by", "# another Contact # - update Feed name url_exists.update_record(name=name) if", "= True # Uncomment to call Stock Adjustments, 'Stock Counts'", "crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label = T(\"Network\"),", "in Newsfeed when clicking on locations instead of opening the", "list_fields, ) elif r.component_name == \"organisation\": # Add Network Status", "options=\"Data\" ) ), S3SQLInlineComponent( \"contact\", name = \"twitter\", label =", "filter_widgets = filter_widgets, list_fields = list_fields, ) return result s3.prep", "return address = form_vars.get(\"address\", None) if address: form_vars.name = address", "# )), (\"cms\", Storage( name_nice = T(\"Content Management\"), #description =", "# Uncomment to disable the use of HR Credentials settings.hrm.use_credentials", "customise_org_group_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom prep", "# Lookup series_id stable = s3db.cms_series try: series_id = db(stable.name", "True # Uncomment this to use Milestones in project/task. settings.project.milestones", "def customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \\", "label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), #hidden", "T(\"RSS\"), multiple = False, fields = [(\"\", \"value\"), #(T(\"Don't Import", "to enable internal support requests #(\"support\", Storage( # name_nice =", "= True return attr settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- #", "S3OptionsFilter(\"status_id\", label = T(\"Status\"), # Not translateable #represent = \"%(name)s\",", "# Uncomment to use have Filter form in Newsfeed be", "create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form =", "Uncomment to show Tags in Newsfeed settings.cms.show_tags = True #", "settings.org.site_autocomplete = True # Extra fields to search in Autocompletes", "# NB This is labelled 'Role' in DRRPP ], filterby", "\"group_membership\", label = T(\"Network\"), fields = [(\"\", \"group_id\"), (\"\", \"status_id\"),", "s3db = current.s3db table = db.org_facility_type ltable = db.org_site_facility_type query", "\"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] #", "1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if name_exists.url", "unique so rename old one name_exists.update_record(name=\"%s (Old)\" % name) if", "# Don't Audit non user-visible resources return False settings.security.audit_write =", "in types: marker = \"warehouse\" elif \"Medical Clinic\" in types:", "custom_prep return attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects", "----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep", "types: marker = \"warehouse\" elif \"Medical Clinic\" in types: marker", "filterby = dict(field = \"level\", options = \"L4\" ), #", "#leafonly = False, #widget = \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label", "= False table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label", "These settings should be changed _after_ the 1st (admin) user", "widget from s3 import s3_comments_widget table.description.widget = s3_comments_widget from gluon", "separator for numbers (defaults to space) settings.L10n.thousands_separator = \",\" #", "[(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"),", "table.name.notnull = False table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\",", "r.representation == \"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable =", "= \"\", multiple = False, fields = hr_fields, ), #S3SQLInlineComponent(", ") settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db =", "= None # No Menu )), (\"sync\", Storage( name_nice =", "Theme (folder to use for views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row", "return attr settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management", "\"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent = \"%(name)s\", #hidden", "record details on People\", restricted = True, access = \"|1|\",", "Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import:", "script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\", \"name\", \"code\",", "of AC field.widget = None script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id',", "callable(standard_prep): result = standard_prep(r) else: result = True if r.interactive", "Management, Request Management and Asset Management\", restricted = True, module_type", "rss_import = None else: ctable = s3db.pr_contact query = (ctable.pe_id", "r.method not in (\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if", "s3db.hrm_multi_skill_represent for skill in skills: item = \"%s %s\" %", "= True # Set the label for Sites settings.org.site_label =", "Menu )), (\"sync\", Storage( name_nice = T(\"Synchronization\"), #description = \"Synchronization\",", "body) else: # Skills body = \"%s\\n%s\" % (row.purpose, body)", "being from the Site settings.req.requester_from_site = True # Label for", "settings.auth.record_approval = True settings.auth.record_approval_required_for = (\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit", "name).select(table.id, table.channel_id, table.enabled, table.url, limitby = (0, 1) ).first() no_import", "s3db.cms_series try: series_id = db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1)", "= list_fields, ) if r.interactive: from gluon.html import DIV, INPUT", "Storage( name_nice = T(\"Administration\"), #description = \"Site Administration\", restricted =", "when a user registers #settings.auth.registration_requests_site = True # Roles that", "tablename): \"\"\" Customise pr_group resource (in group & org_group controllers)", ").first() if url_exists: # We have 2 feeds: 1 for", "\"name\", \"code\", \"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\",", "supplies are requested.\", restricted = True, module_type = 1, )),", "point to record details on People\", restricted = True, access", "Format for filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\", )", "# Inventory Management # Uncomment to customise the label for", "= { \\ # \"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\" :", "= rtable.priority.represent(row.priority) date_required = row.date_required if date_required: date = rtable.date_required.represent(date_required)", "T(\"Partner Organizations\"), fields = [\"organisation_id\", \"comments\", # NB This is", "module_type = None, # Not displayed )), (\"inv\", Storage( name_nice", "& \\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted == False)", "not found: cannot set rss_import correctly\" % r.component_id) # Default", "OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"] #", "dict(id=_id) s3db.update_super(table, record) # Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id)", "settings.hrm.org_required = False # Uncomment to show the Organisation name", "being deleted, so we should disable it old_rss = data[0][\"value\"][\"value\"]", "S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"group_membership.group_id\",", "record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser table = s3db.msg_parser _id", "\"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields = list_fields, )", "= True table.meetings.readable = table.meetings.writable = True if r.id: #", "Chain Management\"), #description = \"Used within Inventory Management, Request Management", "Links in Newsfeed settings.cms.show_links = True # Uncomment to show", "Comments box for now # Ultimately should go into location_id$addr_street", "), #S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"), # #hidden = True,", "to chage the label for 'Staff' settings.hrm.staff_label = \"Contacts\" #", "import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical org_service: #from s3", "\"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods Served\"), field =", "\"req_req\", ): # Perform normal Audit return True else: #", "= table.meetings.writable = True if r.id: # Update form ctable", "# \"image\", # name = \"image\", # label = T(\"Photo\"),", "= \"role\", options = \"2\" ) ), S3SQLInlineComponent( \"document\", name", ").first() if rss and not rss.poll: # Remember that we", "# multiple = False, # fields = [(\"\", \"image\")], #", "Receive settings.inv.shipment_types = { 1: T(\"Other Warehouse\") } settings.inv.send_types =", "(& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled: # Nothing", "photos, documents and reports\", restricted = True, module_type = None,", "a simpler (but less accountable) process for managing stock levels", "below here should be possible to disable safely (\"hrm\", Storage(", "DIV, INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget", "contacted settings.org.site_last_contacted = True # Enable certain fields just for", "), S3SQLInlineComponent( \"group_membership\", label = T(\"Network\"), fields = [(\"\", \"group_id\"),", "not r.component: from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets =", "# restricted = True, # module_type = 10, # )),", "= \"Used within Inventory Management, Request Management and Asset Management\",", "Assessments\", restricted = True, module_type = 5, )), (\"event\", Storage(", "the chairperson of a group \"\"\" if hasattr(row, \"pr_group\"): row", "The user-visible functionality of this module isn't normally required. Rather", "# ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment", "name = \"email\", label = EMAIL, multiple = False, fields", "\"meetings\", \"comments\", ) filter_widgets = [ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\",", "= T(\"Status\"), # Not translateable #represent = \"%(name)s\", cols =", "# We have 2 feeds: 1 for the Contact &", "Marker to use for Facilities Map @ToDo: Legend \"\"\" db", "show created_by/modified_by using Names not Emails settings.ui.auth_user_represent = \"name\" #", "\"Relief Site\" in types: marker = \"asset\" elif \"Residential Building\"", "(skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\" % (item, body) # Lookup", "if r.id: # Update form ctable = s3db.pr_contact query =", "if settings.has_module(\"req\"): # Colour code by open/priority requests reqs =", "another Contact # - update Feed name url_exists.update_record(name=name) if no_import:", "and isinstance(output, dict): if \"rheader\" in output: # Custom Tabs", "person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable, record) # Add source", "s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel", "Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for = (\"org_organisation\",) # -----------------------------------------------------------------------------", "image_field.requires = None if r.interactive or r.representation == \"aadata\": if", "[], # \"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\" : [], #", "Phone\" # Enable this to change the label for 'Postcode'", "telephone numbers settings.L10n.default_country_code = 1 # Enable this to change", "Start & End in 1! S3DateFilter(\"start_date\", label = T(\"Start Date\"),", "var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource #", "table.chairperson = Field.Method(\"chairperson\", chairperson) # Format for filter_widgets & imports", "& \\ (mtable.group_head == True) & \\ (mtable.person_id == ptable.id)", "module_type = 10, # )), # @ToDo: Rewrite in a", "S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name == \"human_resource\":", "# Allow components with components (such as org/group) to breakout", "disable responsive behavior of datatables # - Disabled until tested", "themselves? settings.security.self_registration = \"index\" # Do new users need to", "\"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True,", ") ), S3SQLInlineComponent( \"contact\", name = \"facebook\", label = T(\"Facebook\"),", "callable(standard_postp): output = standard_postp(r, output) if r.interactive and isinstance(output, dict):", "restricted = False, module_type = None # No Menu )),", "T(\"Network\"), represent = \"%(name)s\", #hidden = True, ), S3LocationFilter(\"organisation_location.location_id\", label", "field = table.group_type field.default = 3 # Relief Team, to", "change the label for 'Postcode' settings.ui.label_postcode = \"ZIP Code\" #", "Titles in Newsfeed settings.cms.show_titles = True # ----------------------------------------------------------------------------- # Inventory", "label for Organisations in HR module #settings.hrm.organisation_label = \"National Society", "field = s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable = field.writable =", "just Text - put into the Comments box for now", "both Send and Receive settings.inv.shipment_types = { 1: T(\"Other Warehouse\")", "S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden = True, ), # activate", "(\"supply\", Storage( name_nice = T(\"Supply Chain Management\"), #description = \"Used", ").first() except: marker = db(mtable.name == \"office\").select(mtable.image, mtable.height, mtable.width, cache=s3db.cache,", "T(\"Surveys\"), #description = \"Create, enter, and manage surveys.\", restricted =", "restricted = True, module_type = 10, )), (\"survey\", Storage( name_nice", "s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets = filter_widgets, ) field =", "= customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules here to disable/enable", "= True # Extra fields to search in Autocompletes &", "deployment # Should users be allowed to register themselves? settings.security.self_registration", "False table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label =", "imports s3db.add_components(\"pr_group\", org_group_team = \"group_id\", ) s3db.configure(\"pr_group\", # Redirect to", "s3.prep = custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller # -----------------------------------------------------------------------------", "settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db", "{ \"USD\" : T(\"United States Dollars\"), } settings.L10n.languages = OrderedDict([", "an Organisation settings.hrm.org_required = False # Uncomment to show the", "= True, module_type = 10, )), (\"survey\", Storage( name_nice =", "fields = [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods", "= False settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- # Persons def", "for skill in skills: item = \"%s %s\" % (skill.quantity,", "crud_form = crud_form, filter_widgets = filter_widgets, list_fields = list_fields, )", "RSS Feeds \"\"\" s3db = current.s3db form_vars = form.vars rss_url", "T(\"Events\"), #description = \"Activate Events (e.g. from Scenario templates) for", "= standard_postp(r, output) if r.interactive and isinstance(output, dict): if \"rheader\"", "in (\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ): # Perform normal Audit", "T(\"Other Warehouse\") } settings.inv.send_types = { #21: T(\"Distribution\") } settings.inv.send_type_default", "to customise the label for Facilities in Inventory Management settings.inv.facility_label", "from gluon.html import DIV, INPUT from s3 import S3SQLCustomForm, S3SQLInlineComponent", "\"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets", "open/priority requests reqs = record.reqs if reqs == 3: #", "import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent", "} settings.L10n.languages = OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"), ]) #", "instead of created_by in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment", "currently work within an Inline Form #image_field = s3db.pr_image.image #from", ") # Default location to Manhattan db = current.db gtable", "= False settings.inv.send_show_org = False # Types common to both", "True # Set the label for Sites settings.org.site_label = \"Facility\"", "= T(\"Network\"), link = False, fields = [(\"\", \"group_id\")], multiple", "record.site_id) & \\ (ltable.facility_type_id == table.id) rows = db(query).select(table.name) types", "settings.ui.label_mobile_phone = \"Cell Phone\" # Enable this to change the", "fields just for specific Organisations # empty list => disabled", "form_vars.name table = s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id, table.channel_id,", "# ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden = True, ),", "shown in the menu )), (\"admin\", Storage( name_nice = T(\"Admin\"),", "S3LocationFilter(\"location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"),", "q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # -----------------------------------------------------------------------------", "#settings.pr.select_existing = False settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- # Persons", "def customise_org_organisation_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom", "gluon.storage import Storage from s3 import s3_fullname T = current.T", "= \"asset\" elif \"Residential Building\" in types: marker = \"residence\"", "= T(\"Description\") table.meetings.readable = table.meetings.writable = True # Increase size", "not current.auth.user: # Don't include prepop return False if tablename", ") table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) == \"popup\": #", "standard_prep(r) else: result = True if r.interactive or r.representation ==", "(\"cms\", Storage( name_nice = T(\"Content Management\"), #description = \"Content Management", "customise_project_project_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "), ] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job", "standard prep if callable(standard_prep): result = standard_prep(r) if not result:", "body) rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity)", "= False, module_type = None # No Menu )), (\"sync\",", "\"L3\", \"L4\"), hidden = True, ), S3OptionsFilter(\"site_id\", hidden = True,", "= False settings.req.requester_optional = True settings.req.date_writable = False settings.req.item_quantities_writable =", "from gluon.html import A, URL from gluon.storage import Storage from", "#(T(\"Don't Import Feed\"), \"poll\"), ], filterby = dict(field = \"contact_method\",", "# Uncomment to hide fields in S3AddPersonWidget settings.pr.request_dob = False", "from collections import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict", "try: series_id = db(stable.name == \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id", "\"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field", "available return current.messages[\"NONE\"] db = current.db mtable = current.s3db.pr_group_membership ptable", "1) ).first() if rss and not rss.poll: # Remember that", "= \"%(priority)s by %(date)s\" % dict(priority=priority, date=date) else: title =", "of the site otable = s3db.org_site location_id = db(otable.site_id ==", "# Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short", "\"contact_method\", options = \"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\", name =", "is handled separately for the menu )), # Uncomment to", "Storage( name_nice = T(\"Contacts\"), #description = \"Human Resources Management\", restricted", "T(\"Members\"), # #description = \"Membership Management System\", # restricted =", "= True def customise_project_project_controller(**attr): s3 = current.response.s3 # Custom prep", "= \"\" # Gets replaced by widget levels = (\"L2\",", "of victims in Shelters\", # restricted = True, # module_type", "form.record.sub_rsscontact import json data = old_rss = json.loads(old_rss)[\"data\"] if data:", "is being deleted, so we should disable it old_rss =", "name_nice = T(\"Surveys\"), #description = \"Create, enter, and manage surveys.\",", "use Milestones in project/task. settings.project.milestones = False # Uncomment this", "row = db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose,", "# Uncomment to enable the use of HR Education settings.hrm.use_education", "multiple = False, fields = [(\"\", \"value\"), #(T(\"Don't Import Feed\"),", "# Persons # Uncomment to hide fields in S3AddPersonWidget settings.pr.request_dob", "End in 1! S3DateFilter(\"start_date\", label = T(\"Start Date\"), hide_time =", ") ), S3SQLInlineComponent( \"document\", name = \"media\", label = T(\"URLs", "\"pr_person_details.company\" : [], # \"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\" :", "= old_rss = json.loads(old_rss)[\"data\"] if data: # RSS feed is", "victims in Shelters\", # restricted = True, # module_type =", "#description = \"Allow affected individuals & households to register to", "Groups def chairperson(row): \"\"\" Virtual Field to show the chairperson", "Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import:", "not r.component and r.method in (None, \"create\", \"update\"): from s3", "Enable this to change the label for 'Mobile Phone' settings.ui.label_mobile_phone", ")), (\"org\", Storage( name_nice = T(\"Locations\"), #description = 'Lists \"who", "attr settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from", "= \"email\", label = T(\"Email\"), multiple = False, fields =", "This can also be over-ridden for specific contexts later #", "False # GeoNames username settings.gis.geonames_username = \"eden_nyc\" # Uncomment to", "= current.deployment_settings \"\"\" Template settings for NYC Prepared \"\"\" #", "def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep =", "within Inventory Management, Request Management and Asset Management\", restricted =", "list all.\"), #_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"),", "\"organisation.acronym\", ], label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"status_id\",", "= T(\"Email\"), multiple = False, fields = [(\"\", \"value\")], filterby", "types: marker = \"food\" elif \"Relief Site\" in types: marker", "of this module isn't normally required. Rather it's main purpose", "Filter Activity Type by Project filter = {\"linktable\": \"project_activity_type_project\", \"lkey\":", "timezone for users settings.L10n.utc_offset = \"UTC -0500\" # Uncomment these", "name = \"phone\", label = MOBILE, multiple = False, fields", "rtable.comments, limitby=(0, 1) ).first() # Build Title & Body from", "\"contact\", name = \"email\", label = EMAIL, multiple = False,", "what & where\". Allows relief agencies to coordinate their activities',", "this module access = None, # All Users (inc Anonymous)", "= (\"L2\",), points = True, polygons = True, ) #", "= \"Cell Phone\" # Enable this to change the label", "= True # Uncomment this to request the Site when", "= T(\"Neighborhoods Served\"), field = \"location_id\", filterby = dict(field =", "f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form = S3SQLCustomForm(", "Viewer\"), #description = \"Needed for Breadcrumbs\", restricted = False, module_type", "= (0, 1) ).first() if url_exists: # Either Contact has", "requested.\", restricted = True, module_type = 1, )), (\"project\", Storage(", "\"meetings\", \"comments\", postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form = crud_form,", "T(\"Documents\"), #description = \"A library of digital resources, such as", "parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import: # Nothing to do", "T(\"Phone\"), multiple = False, fields = [(\"\", \"value\")], filterby =", "Activities S3SQLInlineComponent( \"location\", label = T(\"Location\"), fields = [(\"\", \"location_id\")],", "translateable #represent = \"%(name)s\", cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\", #", "Budget to be drawn up\", # restricted = True, #", "points = True, polygons = True, ) # Default location", "Storage( name_nice = T(\"Requests\"), #description = \"Manage requests for supplies,", "= False, fields = [(\"\", \"value\"), #(T(\"Don't Import Feed\"), \"poll\"),", "\"Synchronization\", restricted = True, access = \"|1|\", # Only Administrators", "# Partner Orgs S3SQLInlineComponent( \"organisation\", name = \"partner\", label =", "disable/enable them settings.modules = OrderedDict([ # Core modules which shouldn't", "customise_org_organisation_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Custom prep", ").first() if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else: #", "Storage from s3 import s3_fullname T = current.T settings =", "\"facility_type_id\")], multiple = False, required = True, ), \"name\", \"location_id\",", "#(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets =", "accountable) process for managing stock levels #settings.inv.direct_stock_edits = True #", "Organisation Groups settings.org.groups = \"Network\" # Make Services Hierarchical settings.org.services_hierarchical", "S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent =", "# s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True s3.prep = custom_prep return", "current.db s3db = current.s3db table = db.org_facility_type ltable = db.org_site_facility_type", "create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource", "\"\"\" req_id = form.vars.id db = current.db s3db = current.s3db", "# )), #(\"dvr\", Storage( # name_nice = T(\"Disaster Victim Registry\"),", ") settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 =", "= False, # module_type = 10, # )), #(\"member\", Storage(", "url_exists = db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0,", "\"Central point to record details on People\", restricted = True,", "is just Text - put into the Comments box for", "of created_by in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment to", "= db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create Post", "use of Organisation Groups settings.org.groups = \"Network\" # Make Services", "menu & access the controller module_type = None # This", "\"Activate Events (e.g. from Scenario templates) for allocation of appropriate", "# Default Country Code for telephone numbers settings.L10n.default_country_code = 1", "# Uncomment to disable Staff experience settings.hrm.staff_experience = False #", "restricted = False, # Use ACLs to control access to", "pr_contact_postprocess, ) from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate", "\"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets", "# \"pr_person_details.company\" : [], # \"pr_person_details.affiliations\" : [], # \"vol_volunteer.active\"", "restricted = True, # module_type = 2, # )), (\"cms\",", "table.location_id if r.method in (\"create\", \"update\"): field.label = \"\" #", "the use of HR Trainings settings.hrm.use_trainings = False # Uncomment", "T(\"Location\"), fields = [(\"\", \"location_id\")], ), # Partner Orgs S3SQLInlineComponent(", "= Field.Method(\"chairperson\", chairperson) # Format for filter_widgets & imports s3db.add_components(\"pr_group\",", "# Set the label for Sites settings.org.site_label = \"Facility\" #settings.org.site_label", "Uncomment to show the Organisation name in HR represents settings.hrm.show_organisation", "/ Branch\" settings.hrm.organisation_label = \"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3", "= (gtable.name == \"New York\") & \\ (gtable.level == \"L2\")", "label for 'Postcode' settings.ui.label_postcode = \"ZIP Code\" # Uncomment to", "= 100 table.comments.label = T(\"How people can help\") script =", "should be possible to disable safely (\"hrm\", Storage( name_nice =", "already have a channel for this Contact db = current.db", "Storage( name_nice = T(\"Documents\"), #description = \"A library of digital", "representation): if not current.auth.user: # Don't include prepop return False", "= T(\"NYC Prepared\") # Theme (folder to use for views/layout.html)", "= dict(field = \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"),", "current.response.s3.req_req_postprocess = req_req_postprocess if not r.component and r.method in (\"create\",", "# Add source link url = \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\",", "# Uncomment this to use settings suitable for detailed Task", "Not translateable #represent = \"%(name)s\", cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\",", "label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form = S3SQLCustomForm( \"name\",", "\"comments\", \"group_team.org_group_id$name\", ], label = T(\"Search\"), comment = T(\"You can", "standard_prep(r) else: result = True if r.interactive: if r.component_name ==", "cache=s3db.cache, limitby=(0, 1) ).first() except: marker = db(mtable.name == \"office\").select(mtable.image,", "URL one to this Contact # and ensure active or", "= \"RSS\" ) ), S3SQLInlineComponent( \"document\", name = \"iCal\", label", "if r.get_vars.get(\"format\", None) == \"popup\": # Coming from req/create form", "hidden = True, ), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter =", "Text editor in Newsfeed settings.cms.richtext = True # Uncomment to", "\"organisation_type_id\", label = T(\"Type\"), multiple = False, #widget = \"hierarchy\",", "in Inventory Management settings.inv.facility_label = \"Facility\" # Uncomment if you", "prep \"\"\" s3db = current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent", "in types: marker = \"hospital\" elif \"Food\" in types: marker", "return attr settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename):", "rss_url = form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if not rss_url: if", "to disable/enable them settings.modules = OrderedDict([ # Core modules which", "list_fields = list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def", "Branch\" settings.hrm.organisation_label = \"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 =", "customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\",", "the user is automatically approved #settings.auth.always_notify_approver = False # Uncomment", "hidden = True, ), ] s3db = current.s3db s3db.configure(\"hrm_human_resource\", filter_widgets", "module access = None, # All Users (inc Anonymous) can", "customise the label for Facilities in Inventory Management settings.inv.facility_label =", "False # Doesn't yet work (form fails to submit) #settings.pr.select_existing", "# No change to either Contact Name or URL if", "customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects # Use codes for projects", "if r.interactive and isinstance(output, dict): if \"form\" in output: output[\"form\"].add_class(\"pr_person\")", "settings.hrm.show_organisation = True # Uncomment to disable Staff experience settings.hrm.staff_experience", "= standard_prep(r) if not result: return False if r.method not", ") elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return", "body = \"%s\\n%s\" % (row.purpose, body) rstable = s3db.req_req_skill skills", "Autocomplete for Site lookup fields settings.org.site_autocomplete = True # Extra", "# Increase size of widget from s3 import s3_comments_widget table.description.widget", "field.default = 3 # Relief Team, to show up in", "= \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, )", "to this Contact # and ensure active or not as", "& Body from the Request details priority = rtable.priority.represent(row.priority) date_required", "module_type = 10 # )), # @ToDo: Port these Assessments", "db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id = org.pe_id", "Staff experience settings.hrm.staff_experience = False # Uncomment to disable the", "date = rtable.date_required.represent(date_required) title = \"%(priority)s by %(date)s\" % dict(priority=priority,", "\"\"\" s3db = current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form", "(T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\",", "the use of HR Skills #settings.hrm.use_skills = False # Uncomment", "\"human_resource_id\", # Activities S3SQLInlineComponent( \"location\", label = T(\"Location\"), fields =", "), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Network\"), represent = \"%(name)s\", #hidden =", "T(\"Home\"), restricted = False, # Use ACLs to control access", ": [], # \"pr_person_details.company\" : [], # \"pr_person_details.affiliations\" : [],", "= \"FACEBOOK\" ) ), \"comments\", postprocess = pr_contact_postprocess, ) from", "(ctable.pe_id == r.record.pe_id) & \\ (ctable.contact_method == \"RSS\") & \\", "you need a simpler (but less accountable) process for managing", "table.meetings.writable = True # Increase size of widget from s3", "Map @ToDo: Legend \"\"\" db = current.db s3db = current.s3db", "settings.base.paper_size = T(\"Letter\") # Restrict the Location Selector to just", "\"location\", label = T(\"Location\"), fields = [(\"\", \"location_id\")], ), #", "Prepared\") settings.base.system_name_short = T(\"NYC Prepared\") # Theme (folder to use", "= settings.get_ui_label_mobile_phone() EMAIL = T(\"Email\") list_fields += [(MOBILE, \"phone.value\"), (EMAIL,", "s3db.update_super(table, record) # Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) #", "Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form = S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink(", "T(\"Training\"), hidden = True, ), S3OptionsFilter(\"group_membership.group_id\", label = T(\"Team\"), filter", "\"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name = \"phone\", label =", "# Can't validate image without the file # image_field =", "Assessments to the Survey module #(\"building\", Storage( # name_nice =", "as wildcard. Press 'Search' without input to list all.\"), #_class", "if r.interactive or r.representation == \"aadata\": if not r.component: from", "output) if r.interactive and isinstance(output, dict): if \"rheader\" in output:", "but runs before prep \"\"\" s3db = current.s3db table =", "in order to secure the deployment # Should users be", "= json.loads(old_rss)[\"data\"] if data: # RSS feed is being deleted,", "return attr settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- # Persons #", "S3SQLInlineComponent( \"group_person\", label = T(\"Network\"), link = False, fields =", "S3SQLInlineComponent( \"document\", name = \"data\", label = T(\"Data\"), multiple =", "# We default this onvalidation table.name.notnull = False table.name.requires =", "True, module_type = 10, )), # Vehicle depends on Assets", "current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db s3 =", "= [\"organisation_id\", \"comments\", # NB This is labelled 'Role' in", "list_fields = [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"),", "= \"%s_red\" % marker elif reqs == 2: # Medium", "to disable the use of HR Skills #settings.hrm.use_skills = False", "False settings.inv.send_show_org = False # Types common to both Send", "field = \"activity_type_id\", cols = 3, # Filter Activity Type", "== \"RSS\") & \\ (ctable.deleted == False) rss = db(query).select(ctable.poll,", "S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table = r.table tablename", "= FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if r.interactive: tablename = \"org_facility\"", "] s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource", "= 10, )), # Vehicle depends on Assets #(\"vehicle\", Storage(", "#else: # # RHeader wants a simplified version, but don't", "elif reqs == 1: # Low marker = \"%s_green\" %", "in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment to", "), \"phone\", S3SQLInlineComponent( \"contact\", name = \"phone2\", label = T(\"Phone2\"),", "formats (defaults to ISO 31-0) # Decimal separator for numbers", "gluon import Field table.chairperson = Field.Method(\"chairperson\", chairperson) # Format for", "(\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels = (\"L2\",), points = True,", "= \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q)", "\"org_facility\" table = s3db[tablename] if not r.component and r.method in", "within an Inline Form #image_field = s3db.pr_image.image #from gluon.validators import", "name_exists: if name_exists.url == rss_url: # No change to either", "\"phone.value\"), (EMAIL, \"email.value\"), ] s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"phone\",", "S3MultiSelectWidget field = table.location_id if r.method in (\"create\", \"update\"): field.label", "\"index\" # Do new users need to verify their email", "# name = \"image\", # label = T(\"Photo\"), # multiple", "None) if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable", "\"table_inline\" settings.msg.parser = \"NYC\" # Uncomment to Hide the language", "(\"L2\",), points = True, polygons = True, ) # Default", "suitable for detailed Task management settings.project.mode_task = False # Uncomment", "False # Multiple partner organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr):", "#S3SQLInlineLink( \"service\", label = T(\"Services\"), field = \"service_id\", # activate", "in Newsfeed settings.cms.richtext = True # Uncomment to show Links", "= \"National Society / Branch\" settings.hrm.organisation_label = \"Organization\" # -----------------------------------------------------------------------------", ").first() # Build Title & Body from the Request details", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"EMAIL\"", "# #description = \"Human Resources Management\", # restricted = True,", "rtable.date_required.represent(date_required) title = \"%(priority)s by %(date)s\" % dict(priority=priority, date=date) else:", "from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical org_service:", "= \"Content Management System\", restricted = True, module_type = 10,", "current.s3db table = db.org_facility_type ltable = db.org_site_facility_type query = (ltable.site_id", "query = (mtable.group_id == group_id) & \\ (mtable.group_head == True)", "\"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"),", "= T(\"Service\"), #hidden = True, ), # activate hierarchical org_service:", "module isn't normally required. Rather it's main purpose is to", "= dict(field = \"name\", options=\"Data\" ) ), S3SQLInlineComponent( \"contact\", name", "# Uncomment to show Tags in Newsfeed settings.cms.show_tags = True", "header = \"\", hidden = True, ), S3OptionsFilter(\"group_person.group_id\", label =", "This is just Text - put into the Comments box", "runs before prep \"\"\" s3db = current.s3db table = s3db.pr_group", "\"2\" ) ), S3SQLInlineComponent( \"document\", name = \"media\", label =", "#if r.method == \"validate\": # # Can't validate image without", "settings.project.mode_task = False # Uncomment this to use Activities for", "and manage surveys.\", restricted = True, module_type = 5, )),", "= (\"L3\", \"L4\"), #hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\", #label =", "tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\",", "# Uncomment to use Rich Text editor in Newsfeed settings.cms.richtext", "org_service: #leafonly = False, #widget = \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\",", ",) settings.L10n.decimal_separator = \".\" # Thousands separator for numbers (defaults", "S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\",", "= custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def", "cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"), #", "#from gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget = None", "location to Manhattan db = current.db gtable = db.gis_location query", "the Comments box for now # Ultimately should go into", "elif \"Residential Building\" in types: marker = \"residence\" #elif \"Shelter\"", "from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table", "r.component and r.method in (None, \"create\", \"update\"): from s3 import", "Vehicles\", # restricted = True, # module_type = 10, #", "pack_represent(item.item_pack_id), item_represent(item.item_id)) body = \"%s\\n%s\" % (item, body) else: #", "of HR Education settings.hrm.use_education = False # Uncomment to disable", "English settings.L10n.date_format = \"%m-%d-%Y\" # Start week on Sunday settings.L10n.firstDOW", "Uncomment to show the date when a Site (Facilities-only for", "Enable this to change the label for 'Postcode' settings.ui.label_postcode =", "settings.project.activities = True # Uncomment this to use Milestones in", "\"Support Requests\", # restricted = True, # module_type = None", "= \"%s\\n%s\" % (item, body) else: # Skills body =", "= None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility Type\"),", "chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair:", "from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create", "for Inventory Requests settings.req.type_inv_label = \"Supplies\" # Uncomment to enable", "[(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\",", "T(\"Theme\"), # #hidden = True, # ), S3LocationFilter(\"location.location_id\", label =", "None) if address: form_vars.name = address else: # We need", "= [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\",", "[ S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"), _class = \"filter-search\", ),", ") s3db.configure(\"pr_group\", # Redirect to member list when a new", "associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check if we already", "disabled for all (including Admin) #settings.org.dependent_fields = { \\ #", "# not available return current.messages[\"NONE\"] db = current.db mtable =", "list view so HTML is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\", f=\"person\",", "the newswire - @ToDo: Send out Tweets \"\"\" req_id =", "rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first() #", "certain fields just for specific Organisations # empty list =>", "Uncomment to customise the label for Facilities in Inventory Management", "1)).first() if chair: # Only used in list view so", "Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current", "Date\"), hide_time = True, #hidden = True, ), ] list_fields", "s3 import S3SQLCustomForm, S3SQLInlineComponent # We default this onvalidation table.name.notnull", "True, # module_type = 10 # )), # @ToDo: Port", "# Gets replaced by widget levels = (\"L2\", \"L3\") field.requires", "fields to search in Autocompletes & display in Representations settings.org.site_autocomplete_fields", "Press 'Search' without input to list all.\"), #_class = \"filter-search\",", "for 'Staff' settings.hrm.staff_label = \"Contacts\" # Uncomment to allow Staff", "management settings.project.mode_task = False # Uncomment this to use Activities", "#description = \"Site Administration\", restricted = True, access = \"|1|\",", "= organisation_id field.readable = field.writable = False hr_fields.remove(\"organisation_id\") site_id =", "def org_facility_onvalidation(form): \"\"\" Default the name to the Street Address", "= True, module_type = None # No Menu )), (\"errors\",", "customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group resource (in group & org_group", "ctable = s3db.pr_contact query = (ctable.pe_id == pe_id) & \\", "settings = current.deployment_settings \"\"\" Template settings for NYC Prepared \"\"\"", "T(\"Locations\"), #description = 'Lists \"who is doing what & where\".", "for numbers (defaults to ,) settings.L10n.decimal_separator = \".\" # Thousands", "individuals & households to register to receive compensation and distributions\",", "T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter = True, header = \"\", hidden", "to list all.\"), #_class = \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label =", "import rss_import = \"on\" else: # Default rss_import = None", "\"acronym\", S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\", label = T(\"Type\"), multiple", "S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"), _class", "Make Services Hierarchical settings.org.services_hierarchical = True # Set the label", "#settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default = \"staff\"", "T(\"Type\"), multiple = False, #widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( #", "), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden = True, ), #", "numbers (defaults to space) settings.L10n.thousands_separator = \",\" # Default Country", "[\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default =", "[], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [], # } # Uncomment to", "from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\",", "(Old)\" % name) if name_exists.enabled: # Disable channel (& associated", "S3SQLInlineComponent if r.method != \"read\": from gluon.validators import IS_EMPTY_OR from", "if no_import: if url_exists.enabled: # Disable channel (& associated parsers)", "include prepop return False if tablename in (\"cms_post\", \"org_facility\", \"org_organisation\",", "# Custom prep standard_prep = s3.prep def custom_prep(r): # Call", "#(\"vol\", Storage( # name_nice = T(\"Volunteers\"), # #description = \"Human", "= \"activity_type_id\", cols = 3, # Filter Activity Type by", "\"who is doing what & where\". Allows relief agencies to", "elif r.component_name == \"human_resource\": # Don't assume that user is", "r.component: hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\", ] if r.method in", "url_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return", "= False #settings.req.use_req_number = False # Label for Requester settings.req.requester_label", "name_nice = T(\"Synchronization\"), #description = \"Synchronization\", restricted = True, access", "Persons def customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep", "= \"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser =", "\"org_organisation\", \"req_req\", ): # Perform normal Audit return True else:", "False # Uncomment to disable the use of HR Certificates", "url_exists.channel_id) return elif url_exists.enabled: # Nothing to do :) return", "#\"budget\", #\"currency\", \"comments\", ) from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter,", "table.channel_id, table.enabled, limitby = (0, 1) ).first() if url_exists: #", "pe_id) & \\ (ctable.contact_method == \"RSS\") & \\ (ctable.deleted ==", "filter = True, header = \"\", hidden = True, ),", "\"group_id\")], multiple = False, ), \"job_title_id\", \"start_date\", ) list_fields =", "one name_exists.update_record(name=\"%s (Old)\" % name) if name_exists.enabled: # Disable channel", "use % as wildcard. Press 'Search' without input to list", "# Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif", "r.interactive or r.representation == \"aadata\": if not r.component: hr_fields =", "label = T(\"Network\"), fields = [(\"\", \"org_group_id\")], # @ToDo: Make", "= 10 # )), #(\"dvr\", Storage( # name_nice = T(\"Disaster", "# NB This can also be over-ridden for specific contexts", "current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules here", "& Services\", # restricted = True, # module_type = 10", "if not r.component: table = s3db.org_group list_fields = [\"name\", \"mission\",", "table.enabled, table.url, limitby = (0, 1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\",", "# label = T(\"Theme\"), # #hidden = True, # ),", "False # Uncomment to disable the use of HR Skills", "Newsfeed when clicking on locations instead of opening the profile", "\"contact_method\", options = \"FACEBOOK\" ) ), \"comments\", postprocess = pr_contact_postprocess,", "= True # Increase size of widget from s3 import", "# )), # @ToDo: Rewrite in a modern style #(\"budget\",", "Rewrite in a modern style #(\"budget\", Storage( # name_nice =", "created_by in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment to use", "= None # No Menu )), (\"errors\", Storage( name_nice =", "\"human_resource\", label = \"\", multiple = False, fields = hr_fields,", "\"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form", "= pr_contact_postprocess, ) from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter #", "field.label = \"\" # Gets replaced by widget levels =", "# This item is not shown in the menu )),", "# CMS # Uncomment to use Bookmarks in Newsfeed settings.cms.bookmarks", "Ultimately should go into location_id$addr_street fields = [(\"\", \"comments\")], ),", "fields = [(\"\", \"group_id\")], multiple = False, ), \"job_title_id\", \"start_date\",", "= form.record.sub_rsscontact import json data = old_rss = json.loads(old_rss)[\"data\"] if", "Project settings.gis.countries = (\"US\",) settings.fin.currencies = { \"USD\" : T(\"United", "restricted = True, # module_type = 10 # )), #(\"dvr\",", "\"create\", \"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility", "s3 import s3_comments_widget table.description.widget = s3_comments_widget from gluon import Field", "= customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource", "True, module_type = None, )), (\"msg\", Storage( name_nice = T(\"Messaging\"),", "labelled 'Role' in DRRPP ], filterby = dict(field = \"role\",", "# Organisations # # Enable the use of Organisation Groups", "crud_form, list_fields = list_fields, ) elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label", "site otable = s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0,", "\"site_facility_type\", label = T(\"Facility Type\"), fields = [(\"\", \"facility_type_id\")], multiple", "----------------------------------------------------------------------------- # Human Resource Management # Uncomment to chage the", "# Label for Requester settings.req.requester_label = \"Site Contact\" # Filter", "such as photos, documents and reports\", restricted = True, module_type", "], filterby = dict(field = \"role\", options = \"2\" )", "% name) if name_exists.enabled: # Disable channel (& associated parsers)", "\"ZIP Code\" # Uncomment to disable responsive behavior of datatables", "name, description or comments and by network name. You may", "result = standard_prep(r) if not result: return False if r.method", "= False, # This is just Text - put into", "= False # Uncomment this to disable Sectors in projects", "Hide most Fields from s3 import S3SQLCustomForm, S3SQLInlineComponent # We", "True # Roles that newly-registered users get automatically #settings.auth.registration_roles =", "comments and by network name. You may use % as", "associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: # Update the URL", "fields settings.org.site_autocomplete = True # Extra fields to search in", "if form.record: # Update form old_rss = form.record.sub_rsscontact import json", "parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id) return elif url_exists.enabled: # Nothing to do", "enable internal support requests #(\"support\", Storage( # name_nice = T(\"Support\"),", "customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource resource", "Title\"), \"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] # Don't include Email/Phone for", "Feed name url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable channel", "= True if r.interactive or r.representation == \"aadata\": if not", "#hidden = True, ), S3DateFilter(\"end_date\", label = T(\"End Date\"), hide_time", "s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable = field.writable = False hr_fields.remove(\"organisation_id\")", "[(\"\", \"image\")], # filterby = dict(field = \"profile\", # options=[True]", "settings.base.prepopulate = (\"NYC\",) settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC", "if \"Hub\" in types: marker = \"warehouse\" elif \"Medical Clinic\"", "Prepared \"\"\" # Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name = T(\"NYC", "field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels", "ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if", "False, fields = [(\"\", \"group_id\")], multiple = False, ), \"job_title_id\",", "\"Content Management System\", restricted = True, module_type = 10, )),", ": [], # \"pr_person_details.father_name\" : [], # \"pr_person_details.company\" : [],", "marker elif reqs == 1: # Low marker = \"%s_green\"", "[\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\",", "= table.mission.writable = True table.meetings.readable = table.meetings.writable = True if", "options = \"TWITTER\" ) ), S3SQLInlineComponent( \"contact\", name = \"facebook\",", "to be approved by an administrator prior to being able", "PreP standard_prep = s3.prep def custom_prep(r): # Call standard prep", "True, ), # @ToDo: Widget to handle Start & End", "# RSS feed is being deleted, so we should disable", "5, )), #(\"cr\", Storage( # name_nice = T(\"Shelters\"), # #description", "\"New York\") & \\ (gtable.level == \"L2\") manhattan = db(query).select(gtable.id,", "1 # Enable this to change the label for 'Mobile", "This item is handled separately for the menu # )),", "= db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent", "import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\",", "not available return current.messages[\"NONE\"] db = current.db mtable = current.s3db.pr_group_membership", "mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except: marker = db(mtable.name ==", "allocation of appropriate Resources (Human, Assets & Facilities).\", restricted =", "return result s3.prep = custom_prep if current.auth.s3_logged_in(): # Allow components", "= 1 # Enable this to change the label for", "s3.postp = custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller # -----------------------------------------------------------------------------", "been run series_id = None # Location is that of", "#settings.hrm.organisation_label = \"National Society / Branch\" settings.hrm.organisation_label = \"Organization\" #", "T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), hidden = True,", "types: # Hide Private Residences from s3 import FS s3.filter", "if not r.component and r.method in (None, \"create\", \"update\"): from", "Storage( name_nice = T(\"Surveys\"), #description = \"Create, enter, and manage", "record row = db(rtable.id == req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required,", "5, ), \"phone\", S3SQLInlineComponent( \"contact\", name = \"phone2\", label =", "= db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair: #", "Uncomment to show Links in Newsfeed settings.cms.show_links = True #", "} settings.inv.send_type_default = 1 settings.inv.item_status = { #0: current.messages[\"NONE\"], #1:", "website, social media, etc.\"), fields = [\"document_id\", \"name\", \"url\", \"comments\",", "label = T(\"Start Date\"), hide_time = True, #hidden = True,", "marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except: marker =", "this to change the label for 'Postcode' settings.ui.label_postcode = \"ZIP", "= [(\"\", \"value\"), #(T(\"Don't Import Feed\"), \"poll\"), ], filterby =", "hide_time = True, #hidden = True, ), S3DateFilter(\"end_date\", label =", "common to both Send and Receive settings.inv.shipment_types = { 1:", "in a modern style #(\"budget\", Storage( # name_nice = T(\"Budgeting", "# Custom postp standard_postp = s3.postp def custom_postp(r, output): #", "settings suitable for detailed Task management settings.project.mode_task = False #", "across tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True s3.prep =", "show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) == \"popup\":", "S3OptionsFilter(\"organisation_id\", filter = True, header = \"\", hidden = True,", "\"comments\", ], filterby = dict(field = \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\",", "filter_widgets = [ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ], label", "= dict(field = \"level\", options = \"L4\" ), # @ToDo:", "Unknown marker = \"office\" if settings.has_module(\"req\"): # Colour code by", "#S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"), # #hidden = True, #", "Disabled until tested settings.ui.datatables_responsive = False # PDF to Letter", "= rss_import, ), T(\"Don't Import Feed\")), name = \"rss\", label", "= filter_widgets, ) field = r.table.site_id # Don't assume that", "new users need to verify their email address? settings.auth.registration_requires_verification =", "= True # Uncomment to use organisation_id instead of created_by", "= \"Edit\" # Uncomment to disable checking that LatLons are", "channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser table =", "'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script) return result s3.prep =", "# module_type = 10, # )), #(\"member\", Storage( # name_nice", "update Feed name url_exists.update_record(name=name) if no_import: if url_exists.enabled: # Disable", "# #label = T(\"Service\"), # #hidden = True, # ),", "table.organisation_id.readable = table.organisation_id.writable = False table.type.readable = table.type.writable = False", "= { 0: [\"comms_dispatch\"]} #settings.auth.registration_link_user_to = {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") #", "= db(query).select(table.name) types = [row.name for row in rows] #", "= True, ), S3DateFilter(\"end_date\", label = T(\"End Date\"), hide_time =", "q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- #", "an Inline Form #image_field = s3db.pr_image.image #from gluon.validators import IS_IMAGE", "these to use US-style dates in English settings.L10n.date_format = \"%m-%d-%Y\"", "in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment to use person_id", "\"Facility\" #settings.org.site_label = \"Location\" # Uncomment to show the date", "\"value\")], filterby = dict(field = \"contact_method\", options = \"FACEBOOK\" )", "= \"Site Administration\", restricted = True, module_type = None #", "----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3 = current.response.s3 # Custom", "Credentials settings.hrm.use_credentials = False # Uncomment to enable the use", "\"Medical Clinic\" in types: marker = \"hospital\" elif \"Food\" in", "associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled: # Nothing to", "to be accessed from other modules. module_type = None, )),", "s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table =", "False # PDF to Letter settings.base.paper_size = T(\"Letter\") # Restrict", "modules which shouldn't be disabled (\"default\", Storage( name_nice = T(\"Home\"),", "feeds: 1 for the Contact & 1 for the URL", "= filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource #", "order if \"Hub\" in types: marker = \"warehouse\" elif \"Medical", "T(\"End Date\"), hide_time = True, #hidden = True, ), ]", "----------------------------------------------------------------------------- # Persons # Uncomment to hide fields in S3AddPersonWidget", "T(\"Map\"), #description = \"Situation Awareness & Geospatial Analysis\", restricted =", "----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink current.s3db.req_req.site_id.comment =", "= True # ----------------------------------------------------------------------------- # Inventory Management # Uncomment to", "parent=\"group_membership\", child=\"status_id\" )) crud_form = S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\",", "Relief Team, to show up in hrm/group field.readable = field.writable", "False # Types common to both Send and Receive settings.inv.shipment_types", "or comments and by network name. You may use %", "simplified version, but don't want inconsistent across tabs # s3db.pr_group_membership.group_head.label", "current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create Facility\"),", "(\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment to hide inv & req", "should be changed _after_ the 1st (admin) user is #", "= custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def", "and by network name. You may use % as wildcard.", "form: Default rss_import = None crud_form = S3SQLCustomForm( \"name\", \"location_id\",", "\"organisation_id\", \"name\", \"code\", \"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\",", "True # Uncomment this to request the Organisation when a", "= current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # -----------------------------------------------------------------------------", "return else: # Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id)", "== marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() except: marker", "record = dict(id=_id) s3db.update_super(ptable, record) # Add source link url", "\"hospital\" elif \"Food\" in types: marker = \"food\" elif \"Relief", "label = T(\"Services\"), field = \"service_id\", # activate hierarchical org_service:", "# Default timezone for users settings.L10n.utc_offset = \"UTC -0500\" #", "] s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields =", "# pass # Add RSS Channel _id = table.insert(name=name, enabled=True,", "= False, # Use ACLs to control access to this", "# # Enable the use of Organisation Groups settings.org.groups =", "S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label =", "= \"%s_green\" % marker mtable = db.gis_marker try: marker =", "\"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False table.type.readable", "name_exists.url == rss_url: # No change to either Contact Name", "replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires =", "customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group resource", "= customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 #", "] list_fields = [\"id\", \"name\", \"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"),", "# Gets replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",))", "components (such as org/group) to breakout from tabs attr[\"native\"] =", "name = \"twitter\", label = T(\"Twitter\"), multiple = False, fields", "need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr):", "# Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for = (\"org_organisation\",) #", "to either Contact Name or URL if no_import: if name_exists.enabled:", "\"activity_type_id\", }, ), #\"budget\", #\"currency\", \"comments\", ) from s3 import", "be over-ridden for specific contexts later # e.g. Activities filtered", "table.description.label = T(\"Description\") table.meetings.readable = table.meetings.writable = True # Increase", "user is from same org/site as Contacts they create field.default", "= S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\",", "= 10 )), (\"org\", Storage( name_nice = T(\"Locations\"), #description =", "Uncomment to disable the use of HR Credentials settings.hrm.use_credentials =", "distributions\", # restricted = False, # module_type = 10, #", "Don't assume that user is from same org/site as Contacts", "Storage( name_nice = T(\"Events\"), #description = \"Activate Events (e.g. from", ")), #(\"vol\", Storage( # name_nice = T(\"Volunteers\"), # #description =", "rss_import = None else: # Create form: Default rss_import =", "show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) ==", "# Uncomment to show created_by/modified_by using Names not Emails settings.ui.auth_user_represent", "Uncomment to adjust filters in Newsfeed when clicking on locations", "f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename):", "table.channel_id, table.enabled, table.url, limitby = (0, 1) ).first() no_import =", "S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table = r.table tablename =", "rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent", "\\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1]) q['(site)']=$(this).val() url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})'''", "T(\"Type\"), #hidden = True, ), ] list_fields = [\"name\", (T(\"Type\"),", "(\"NYC\",) settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC Prepared\") #", "True if r.interactive: if r.component_name == \"facility\": if r.method in", "to disable the use of HR Description settings.hrm.use_description = False", "mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget = S3MultiSelectWidget(multiple=False, create=dict(c=\"org\",", "functionality of this module isn't normally required. Rather it's main", "1 for the Contact & 1 for the URL #", "Name or this feed is associated with # another Contact", "gluon.html import DIV, INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink,", "= T(\"Team\"), filter = True, header = \"\", hidden =", "= s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first()", "field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name", "table.type.readable = table.type.writable = False return result s3.prep = custom_prep", "tabs=tabs) return output s3.postp = custom_postp return attr settings.customise_org_organisation_controller =", "r.table.site_id # Don't assume that user is from same org/site", "'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application s3.jquery_ready.append(script) return result", "& access the controller module_type = None # This item", "s3db.configure(tablename, crud_form = crud_form, onvalidation = org_facility_onvalidation, ) return True", "#(\"proc\", Storage( # name_nice = T(\"Procurement\"), # #description = \"Ordering", "not r.component and r.method in (\"create\", \"update\"): script = \\", "# image_field = s3db.pr_image.image # image_field.requires = None if r.interactive", "use of HR Education settings.hrm.use_education = False # Uncomment to", "= True if r.id: # Update form ctable = s3db.pr_contact", "\"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form = crud_form, filter_widgets = filter_widgets,", "dict(field = \"contact_method\", options = \"EMAIL\" ) ), \"website\", S3SQLInlineComponent(", "Check Now async = current.s3task.async async(\"msg_poll\", args=[\"msg_rss_channel\", channel_id]) async(\"msg_parse\", args=[channel_id,", "rtable.priority.represent(row.priority) date_required = row.date_required if date_required: date = rtable.date_required.represent(date_required) title", "Newsfeed settings.cms.bookmarks = True # Uncomment to use have Filter", "as get over_written by hrm_group_controller() list_fields = [(T(\"Network\"), \"group_team.org_group_id\"), \"name\",", "True, ), S3LocationFilter(\"location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\",", "settings.org.site_label = \"Facility\" #settings.org.site_label = \"Location\" # Uncomment to show", "= \"group_id\", ) s3db.configure(\"pr_group\", # Redirect to member list when", "tabs = [(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"),", "= \"contact_method\", options = \"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\", name", "= True, ), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden = True,", "multiple = False, # This is just Text - put", "normally required. Rather it's main purpose is to be accessed", "#description = \"Needed for Breadcrumbs\", restricted = False, module_type =", "Certificates settings.hrm.use_certificates = False # Uncomment to disable the use", "url_exists: # Either Contact has changed Name or this feed", "style #(\"budget\", Storage( # name_nice = T(\"Budgeting Module\"), # #description", "# Redirect to member list when a new group has", "\"RSS\") & \\ (ctable.deleted == False) rss = db(query).select(ctable.poll, limitby=(0,", "\"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"] =", "from gluon import current from gluon.html import A, URL from", "async(\"msg_parse\", args=[channel_id, \"parse_rss\"]) # ----------------------------------------------------------------------------- # Human Resource Management #", "show Links in Newsfeed settings.cms.show_links = True # Uncomment to", "= current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll:", "IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels = (\"L2\",), points", "Hierarchical MultiSelectWidget #cols = 5, ), \"phone\", S3SQLInlineComponent( \"contact\", name", "member list when a new group has been created create_next", "Body from the Request details priority = rtable.priority.represent(row.priority) date_required =", "\"Teams\" to \"Groups\" settings.hrm.teams = \"Groups\" # Custom label for", "= \"contact_method\", options = \"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename,", "settings.security.self_registration = \"index\" # Do new users need to verify", "process for managing stock levels #settings.inv.direct_stock_edits = True # Uncomment", "Rich Text editor in Newsfeed settings.cms.richtext = True # Uncomment", "fields = [(\"\", \"value\"), #(T(\"Don't Import Feed\"), \"poll\"), ], filterby", "the controller module_type = None # This item is handled", "Check if we already have a channel for this Contact", "# #description = \"Ordering & Purchasing of Goods & Services\",", "settings.project.milestones = False # Uncomment this to disable Sectors in", "rss.poll: # Remember that we don't wish to import rss_import", "of HR Credentials settings.hrm.use_credentials = False # Uncomment to enable", "# Custom Tabs tabs = [(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"),", "s3.prep = custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller # -----------------------------------------------------------------------------", "Flexible Impact Assessments\", restricted = True, module_type = 5, )),", "= None # This item is not shown in the", "else: # Component if r.component_id: # Update form db =", "#description = \"Content Management System\", restricted = True, module_type =", "S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden = True, ), ] list_fields", "not rss_url: if form.record: # Update form old_rss = form.record.sub_rsscontact", "url=url[0]+'?'+S3.queryString.stringify(q) $('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment", "= s3db.supply_item_represent pack_represent = s3db.supply_item_pack_represent for item in items: item", "affected individuals & households to register to receive compensation and", "\"media\", label = T(\"URLs (media, fundraising, website, social media, etc.\"),", "have a channel for this Contact db = current.db name", "form_vars.get(\"address\", None) if address: form_vars.name = address else: # We", "= T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable = table.meetings.writable = True", "restricted = True, module_type = None, # Not displayed )),", "Skills #settings.hrm.use_skills = False # Uncomment to disable the use", "Update form db = current.db otable = s3db.org_organisation org =", "already have a channel for this URL url_exists = db(table.url", "show Tags in Newsfeed settings.cms.show_tags = True # Uncomment to", "= T(\"Synchronization\"), #description = \"Synchronization\", restricted = True, access =", "customise_org_organisation_resource(r, tablename): from gluon.html import DIV, INPUT from s3 import", "# ----------------------------------------------------------------------------- def org_facility_onvalidation(form): \"\"\" Default the name to the", "the Organisation when a user registers settings.auth.registration_requests_organisation = True #", "not in (\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not", "human_resource, organisation & person controllers) - runs after controller customisation", "Allow components with components (such as org/group) to breakout from", "# Default location to Manhattan db = current.db gtable =", "= s3_comments_widget from gluon import Field table.chairperson = Field.Method(\"chairperson\", chairperson)", "True s3.prep = custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller #", "otable = s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1)", "table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if r.get_vars.get(\"format\", None) == \"popup\": # Coming", "10, # )), # @ToDo: Rewrite in a modern style", "table.code.label = T(\"Project blurb (max. 100 characters)\") table.code.max_length = 100", "not types: # Hide Private Residences from s3 import FS", "dict(field = \"contact_method\", options = \"EMAIL\")), ) crud_form = S3SQLCustomForm(*s3_sql_custom_fields)", "#description = \"Receiving and Sending Items\", restricted = True, module_type", "= r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types: # Hide Private Residences", "form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record: # Update form old_rss", "----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html import DIV, INPUT from", "\"rheader\" in output: # Custom Tabs tabs = [(T(\"Basic Details\"),", "Feeds \"\"\" s3db = current.s3db form_vars = form.vars rss_url =", "items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent", "= \"%s\\n%s\" % (row.purpose, body) rstable = s3db.req_req_skill skills =", "fields in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender = False #", "\"|1|\", # Only Administrators can see this module in the", "in (\"read\", \"update\"): types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types:", "(\"\", \"status_id\"), ], ), S3SQLInlineComponent( \"address\", label = T(\"Address\"), multiple", "r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return result s3.prep", "\"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label = T(\"Network\"), link = False,", "reverse_lx=True, show_address=True, show_postcode=True, ) elif r.component_name == \"human_resource\": # Don't", "(& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check if we", "\"update\"): from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id", "= priority body = row.comments if row.type == 1: #", "Newsfeed settings.cms.show_tags = True # Uncomment to show post Titles", "here to disable/enable them settings.modules = OrderedDict([ # Core modules", "# @ToDo: Make this optional? multiple = False, ), \"meetings\",", "): # Perform normal Audit return True else: # Don't", "customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def chairperson(row): \"\"\" Virtual Field", "= True if r.interactive or r.representation == \"aadata\": table =", ")), # Vehicle depends on Assets #(\"vehicle\", Storage( # name_nice", "# Start week on Sunday settings.L10n.firstDOW = 0 # Number", "S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter", "output and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp = custom_postp", "tablename): \"\"\" Customise hrm_human_resource resource (in facility, human_resource, organisation &", "in English settings.L10n.date_format = \"%m-%d-%Y\" # Start week on Sunday", "s3.postp = custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller # -----------------------------------------------------------------------------", "digital resources, such as photos, documents and reports\", restricted =", "rename old one name_exists.update_record(name=\"%s (Old)\" % name) if name_exists.enabled: #", "filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource # -----------------------------------------------------------------------------", "label = T(\"Type\"), multiple = False, #widget = \"hierarchy\", ),", "True, # The user-visible functionality of this module isn't normally", "= \"Rapid Assessments & Flexible Impact Assessments\", restricted = True,", "== \"group_membership\": s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") return result s3.prep =", "Newsfeed be open by default settings.cms.filter_open = True # Uncomment", "[ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ], label = T(\"Name\"),", "rss_import correctly\" % r.component_id) # Default rss_import = None else:", "== \"Request\").select(stable.id, cache=s3db.cache, limitby=(0, 1) ).first().id except: # Prepop hasn't", "= True, module_type = 1, )), (\"project\", Storage( name_nice =", "#\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy = 5 #", "a Budget to be drawn up\", # restricted = True,", "label for 'Mobile Phone' settings.ui.label_mobile_phone = \"Cell Phone\" # Enable", "resource (in facility, human_resource, organisation & person controllers) - runs", "\"value\")], filterby = dict(field = \"contact_method\", options = \"WORK_PHONE\" )", "settings.inv.send_type_default = 1 settings.inv.item_status = { #0: current.messages[\"NONE\"], #1: T(\"Dump\"),", "hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\", ] if r.method in (\"create\",", "# Only used in list view so HTML is OK", "# Uncomment to disable checking that LatLons are within boundaries", "#(\"dvr\", Storage( # name_nice = T(\"Disaster Victim Registry\"), # #description", "# Location is that of the site otable = s3db.org_site", "disable the use of HR Trainings settings.hrm.use_trainings = False #", "items: item = \"%s %s %s\" % (item.quantity, pack_represent(item.item_pack_id), item_represent(item.item_id))", "\"status_id\"), ], ), S3SQLInlineComponent( \"address\", label = T(\"Address\"), multiple =", "name = \"human_resource\", label = \"\", multiple = False, fields", "#description = \"Situation Awareness & Geospatial Analysis\", restricted = True,", "# Uncomment to adjust filters in Newsfeed when clicking on", "= True if not r.component: table = s3db.org_group list_fields =", "= s3db.cms_post _id = ptable.insert(series_id=series_id, title=title, body=body, location_id=location_id, person_id=row.requester_id, )", "if manhattan: field.default = manhattan.id table.mission.readable = table.mission.writable = True", "to show the Organisation name in HR represents settings.hrm.show_organisation =", "# Should users be allowed to register themselves? settings.security.self_registration =", "email address settings.hrm.email_required = False # Uncomment to allow Staff", "= db.gis_location query = (gtable.name == \"New York\") & \\", "Check if we already have a channel for this URL", "one and link the URL one to this Contact #", "\"post_organisation_group.group_id\" # Uncomment to use person_id instead of created_by in", "new group has been created create_next = URL(c=\"hrm\", f=\"group\", args=[\"[id]\",", "if callable(standard_prep): result = standard_prep(r) else: result = True if", "= \"media\", label = T(\"URLs (media, fundraising, website, social media,", "= db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id =", "Context from a Profile page?\" organisation_id = get_vars.get(\"(organisation)\", None) if", "table.mission.readable = table.mission.writable = True table.meetings.readable = table.meetings.writable = True", "in Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment to use org_group_id", "work within an Inline Form #image_field = s3db.pr_image.image #from gluon.validators", "S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ], label = T(\"Name\"), _class", "e.g. Activities filtered to those of parent Project settings.gis.countries =", "db.gis_marker try: marker = db(mtable.name == marker).select(mtable.image, mtable.height, mtable.width, cache=s3db.cache,", "We have 2 feeds: 1 for the Contact & 1", "hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None # ImageCrop widget doesn't currently", ")), (\"event\", Storage( name_nice = T(\"Events\"), #description = \"Activate Events", "\"Manage requests for supplies, assets, staff or other resources. Matches", "name_nice = T(\"Volunteers\"), # #description = \"Human Resources Management\", #", "user registers settings.auth.registration_requests_mobile_phone = True # Uncomment this to request", ") # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts import S3AddResourceLink", "instead of created_by in Newsfeed settings.cms.person = \"person_id\" # Uncomment", "Storage( name_nice = T(\"Locations\"), #description = 'Lists \"who is doing", "Should users be allowed to register themselves? settings.security.self_registration = \"index\"", "prep standard_prep = s3.prep def custom_prep(r): # Call standard prep", "= False, #widget = \"hierarchy\", ), S3SQLInlineComponentMultiSelectWidget( # activate hierarchical", "= [(\"\", \"group_id\"), (\"\", \"status_id\"), ], ), S3SQLInlineComponent( \"address\", label", "url=rss_url) record = dict(id=_id) s3db.update_super(table, record) # Enable channel_id =", "#(\"member\", Storage( # name_nice = T(\"Members\"), # #description = \"Membership", "or Hierarchical MultiSelectWidget #cols = 5, ), \"phone\", S3SQLInlineComponent( \"contact\",", "list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db", "T(\"Person Registry\"), #description = \"Central point to record details on", "the date when a Site (Facilities-only for now) was last", "form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db", "\"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [], # } #", "in a new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments = \"Media\"", "= \"Manage requests for supplies, assets, staff or other resources.", "# RHeader wants a simplified version, but don't want inconsistent", "cache=s3db.cache, limitby=(0, 1) ).first() return marker # ----------------------------------------------------------------------------- def org_facility_onvalidation(form):", "custom_prep if current.auth.s3_logged_in(): # Allow components with components (such as", "= T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC Prepared\") # Theme (folder", "\"\"\" Runs after crud_form completes - creates a cms_post in", "# Uncomment to enable internal support requests #(\"support\", Storage( #", "# Use Marker in preferential order if \"Hub\" in types:", "= site_id field.readable = field.writable = False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default", "= {\"staff\":T(\"Staff\"), # #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy", "Inline Form #image_field = s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires", ":) return #else: # # Create a new Feed #", "Safety Assessments\", # restricted = True, # module_type = 10,", "# #\"volunteer\":T(\"Volunteer\") # } settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy = 5", "# Doesn't yet work (form fails to submit) #settings.pr.select_existing =", "\"RSS\") & \\ (ctable.deleted == False) rss = current.db(query).select(ctable.poll, limitby=(0,", "surveys.\", restricted = True, module_type = 5, )), #(\"cr\", Storage(", "\"\"\" Virtual Field to show the chairperson of a group", "\"\" # Gets replaced by widget #field.requires = IS_LOCATION_SELECTOR2(levels =", "S3DateFilter(\"start_date\", label = T(\"Start Date\"), hide_time = True, #hidden =", "this module in the default menu (access to controller is", "name = \"facebook\", label = T(\"Facebook\"), multiple = False, fields", "(ltable.site_id == record.site_id) & \\ (ltable.facility_type_id == table.id) rows =", "= standard_postp(r, output) if r.interactive and isinstance(output, dict): if \"form\"", "else: # Skills body = \"%s\\n%s\" % (row.purpose, body) rstable", "] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\", #\"middle_name\", \"last_name\", (T(\"Job Title\"),", "\"rkey\": \"activity_type_id\", }, ), #\"budget\", #\"currency\", \"comments\", ) from s3", "Approval settings.auth.record_approval = True settings.auth.record_approval_required_for = (\"org_organisation\",) # ----------------------------------------------------------------------------- #", "org/site as Contacts they create r.component.table.site_id.default = None return result", "= S3MultiSelectWidget(multiple=False, create=dict(c=\"org\", f=\"group_membership_status\", label=str(T(\"Add New Status\")), parent=\"group_membership\", child=\"status_id\" ))", "field.writable = False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None # ImageCrop", "\"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"), \"email.value\"), (settings.get_ui_label_mobile_phone(),", "= \"partner\", label = T(\"Partner Organizations\"), fields = [\"organisation_id\", \"comments\",", "date=date) else: title = priority body = row.comments if row.type", "True, # module_type = 2, # )), (\"cms\", Storage( name_nice", ") crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields =", "#description = \"A library of digital resources, such as photos,", "datatables # - Disabled until tested settings.ui.datatables_responsive = False #", "channel for this Contact db = current.db name = form_vars.name", "# 8th item in the menu )), (\"pr\", Storage( name_nice", "\"L4\"), hidden = True, ), S3OptionsFilter(\"site_id\", hidden = True, ),", "No Menu )), (\"errors\", Storage( name_nice = T(\"Ticket Viewer\"), #description", "controller module_type = None # This item is not shown", "True, ), ] list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"),", "settings.L10n.languages = OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"), ]) # Authentication", "# ImageCrop widget doesn't currently work within an Inline Form", "Management # Uncomment to chage the label for 'Staff' settings.hrm.staff_label", "!= \"read\": from gluon.validators import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2,", "s3.prep = custom_prep return attr settings.customise_pr_group_controller = customise_pr_group_controller # -----------------------------------------------------------------------------", "# activate hierarchical org_service: #leafonly = False, #widget = \"hierarchy\",", "mtable = current.s3db.pr_group_membership ptable = db.pr_person query = (mtable.group_id ==", "We need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def", "Activity Type by Project filter = {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\",", "IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget", "tabs from Sites #settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def facility_marker_fn(record):", "True, header = \"\", hidden = True, ), S3OptionsFilter(\"group_person.group_id\", label", "\"Media\" settings.ui.update_label = \"Edit\" # Uncomment to disable checking that", "req_id = form.vars.id db = current.db s3db = current.s3db rtable", "dict(field = \"contact_method\", options = \"FACEBOOK\" ) ), \"comments\", postprocess", "else: ctable = s3db.pr_contact query = (ctable.pe_id == pe_id) &", "Colour code by open/priority requests reqs = record.reqs if reqs", "})''' % r.application s3.jquery_ready.append(script) return result s3.prep = custom_prep return", "all (including Admin) #settings.org.dependent_fields = { \\ # \"pr_person_details.mother_name\" :", "# ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db = current.s3db s3 = current.response.s3", "to use US-style dates in English settings.L10n.date_format = \"%m-%d-%Y\" #", "= \"location_id\", filterby = dict(field = \"level\", options = \"L4\"", "Requests settings.req.type_inv_label = \"Supplies\" # Uncomment to enable Summary 'Site", "== table.id) rows = db(query).select(table.name) types = [row.name for row", "link url = \"%s%s\" % (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"],", "2 feeds: 1 for the Contact & 1 for the", "# @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols = 5,", "T(\"Categories\"), field = \"activity_type_id\", cols = 3, # Filter Activity", "\"comments\", postprocess = pr_contact_postprocess, ) from s3 import S3LocationFilter, S3OptionsFilter,", "= '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\",", "wildcard. Press 'Search' without input to list all.\"), #_class =", "#description = \"Site Administration\", restricted = True, module_type = None", "as appropriate # Name field is unique so rename old", "# \"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\" : [], # }", "if url_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", url_exists.channel_id)", "may use % as wildcard. Press 'Search' without input to", "# activate hierarchical org_service: #S3SQLInlineLink( \"service\", label = T(\"Services\"), field", "\"contact_method\", options = \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name =", "# Vehicle depends on Assets #(\"vehicle\", Storage( # name_nice =", "(item, body) # Lookup series_id stable = s3db.cms_series try: series_id", ")), (\"req\", Storage( name_nice = T(\"Requests\"), #description = \"Manage requests", "db(rstable.req_id == req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill in", "Doesn't yet work (form fails to submit) #settings.pr.select_existing = False", "site_id: field = s3db.hrm_human_resource.site_id field.default = site_id field.readable = field.writable", "r.interactive or r.representation == \"aadata\": table = current.s3db.hrm_job_title table.organisation_id.readable =", "= customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db s3", "if r.interactive: from gluon.html import DIV, INPUT from s3 import", "from gluon.storage import Storage from s3 import s3_fullname T =", "image without the file # image_field = s3db.pr_image.image # image_field.requires", "= False # PDF to Letter settings.base.paper_size = T(\"Letter\") #", "= customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename): \"\"\" Customise pr_group", "\"Network\" # Make Services Hierarchical settings.org.services_hierarchical = True # Set", "#(\"budget\", Storage( # name_nice = T(\"Budgeting Module\"), # #description =", "# We need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # -----------------------------------------------------------------------------", "= T(\"Network\"), represent = \"%(name)s\", #hidden = True, ), S3LocationFilter(\"organisation_location.location_id\",", "= custom_postp return attr settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- #", "(0, 1) ).first() no_import = current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if", "r.representation == \"aadata\"): from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db", "== \"org_organisation\": if r.id: # Update form ctable = s3db.pr_contact", "r.interactive and isinstance(output, dict): if \"form\" in output: output[\"form\"].add_class(\"pr_person\") elif", "restricted = True, # module_type = 10, # )), (\"req\",", "allowed to register themselves? settings.security.self_registration = \"index\" # Do new", "= table.location_id field.label = \"\" # Gets replaced by widget", "to show created_by/modified_by using Names not Emails settings.ui.auth_user_represent = \"name\"", "= True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden =", "inconsistent across tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True s3.prep", "form.vars name = form_vars.get(\"name\", None) if name: return address =", "(settings.get_ui_label_mobile_phone(), \"phone.value\"), ] s3db.configure(\"hrm_human_resource\", crud_form = crud_form, list_fields = list_fields,", "URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r,", "those of parent Project settings.gis.countries = (\"US\",) settings.fin.currencies = {", "on locations instead of opening the profile page settings.cms.location_click_filters =", "countries # NB This can also be over-ridden for specific", "s3db.org_group list_fields = [\"name\", \"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields", "= T(\"Assessments\"), #description = \"Rapid Assessments & Flexible Impact Assessments\",", "# No Menu )), (\"sync\", Storage( name_nice = T(\"Synchronization\"), #description", "#1: T(\"Dump\"), #2: T(\"Sale\"), #3: T(\"Reject\"), #4: T(\"Surplus\") } #", "1) ).first() if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id) return else:", "== \"facility\": if r.method in (None, \"create\", \"update\"): from s3", "York\") & \\ (gtable.level == \"L2\") manhattan = db(query).select(gtable.id, limitby=(0,", "r.get_vars.get(\"format\", None) == \"popup\": # Coming from req/create form #", "\\ # \"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\" : [], #", "hide inv & req tabs from Sites #settings.org.site_inv_req_tabs = True", "s3db = current.s3db rtable = s3db.req_req # Read the full", "pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS Feeds \"\"\" s3db = current.s3db", "types = r.get_vars.get(\"site_facility_type.facility_type_id__belongs\", None) if not types: # Hide Private", "[\"organisation_id\", \"job_title_id\", \"site_id\", ] if r.method in (\"create\", \"update\"): get_vars", "S3SQLCustomForm, S3SQLInlineComponent if r.method != \"read\": from gluon.validators import IS_EMPTY_OR", "name_nice = T(\"Admin\"), #description = \"Site Administration\", restricted = True,", "run series_id = None # Location is that of the", "(\"L1\", \"L2\", \"L3\", \"L4\"), hidden = True, ), S3OptionsFilter(\"site_id\", hidden", "= 5, )), (\"event\", Storage( name_nice = T(\"Events\"), #description =", "to disable the use of HR Trainings settings.hrm.use_trainings = False", "use of HR Credentials settings.hrm.use_credentials = False # Uncomment to", "# Uncomment this to disable Sectors in projects settings.project.sectors =", "& End in 1! S3DateFilter(\"start_date\", label = T(\"Start Date\"), hide_time", "\"value\"), #(T(\"Don't Import Feed\"), \"poll\"), ], filterby = dict(field =", "# #hidden = True, # ), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"),", "= \"name\", options=\"iCal\" ) ), S3SQLInlineComponent( \"document\", name = \"data\",", "), # @ToDo: Widget to handle Start & End in", "db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not rss.poll: #", "current.db otable = s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0,", "r.interactive: from gluon.html import DIV, INPUT from s3 import S3SQLCustomForm,", "Filter Requester as being from the Site settings.req.requester_from_site = True", "(\"gis\", Storage( name_nice = T(\"Map\"), #description = \"Situation Awareness &", "# Setup Parser table = s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\",", "= True # Uncomment to show post Titles in Newsfeed", "to control access to this module access = None, #", "table.type.writable = False return result s3.prep = custom_prep return attr", "= \"Supplies\" # Uncomment to enable Summary 'Site Needs' tab", "(\"cms_post\", \"org_facility\", \"org_organisation\", \"req_req\", ): # Perform normal Audit return", "facility, human_resource, organisation & person controllers) - runs after controller", "Adjustments, 'Stock Counts' settings.inv.stock_count = True # Uncomment to not", "# Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if", "\"group_team.org_group_id$name\", ], label = T(\"Search\"), comment = T(\"You can search", "callable(standard_prep): result = standard_prep(r) else: result = True s3db =", "item in the menu )), (\"pr\", Storage( name_nice = T(\"Person", "Call standard prep if callable(standard_prep): result = standard_prep(r) else: result", "\"\"\" Customise hrm_human_resource resource (in facility, human_resource, organisation & person", "be disabled (\"default\", Storage( name_nice = T(\"Home\"), restricted = False,", "Contact has changed Name or this feed is associated with", "NYC Prepared \"\"\" # Pre-Populate settings.base.prepopulate = (\"NYC\",) settings.base.system_name =", "label = \"iCAL\", multiple = False, fields = [(\"\", \"url\")],", "# Authentication settings # These settings should be changed _after_", "= (\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget", "creates a cms_post in the newswire - @ToDo: Send out", "Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment to use person_id instead", "rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first() # Build Title &", "field.readable = field.writable = False hr_fields.remove(\"organisation_id\") site_id = get_vars.get(\"(site)\", None)", "- but runs before prep \"\"\" s3db = current.s3db table", "----------------------------------------------------------------------------- # Inventory Management # Uncomment to customise the label", "requests #(\"support\", Storage( # name_nice = T(\"Support\"), # #description =", "Uncomment this to use Activities for projects settings.project.activities = True", "within boundaries of their parent #settings.gis.check_within_parent_boundaries = False # GeoNames", "newswire - @ToDo: Send out Tweets \"\"\" req_id = form.vars.id", "hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp = custom_postp return attr", "# Perform normal Audit return True else: # Don't Audit", "Counts' settings.inv.stock_count = True # Uncomment to not track pack", "(max. 100 characters)\") table.code.max_length = 100 table.comments.label = T(\"How people", "None) if name: return address = form_vars.get(\"address\", None) if address:", "else: # Nothing to do :) return # Check if", ")), # All modules below here should be possible to", "= [ S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"), _class = \"filter-search\",", "name_nice = T(\"Procurement\"), # #description = \"Ordering & Purchasing of", "= s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url,", "True # Uncomment to call Stock Adjustments, 'Stock Counts' settings.inv.stock_count", "and breakdown of victims in Shelters\", # restricted = True,", "# Update the URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: #", "s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if r.component_name == \"group_membership\": from s3layouts", "in output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in output and hasattr(output[\"item\"], \"add_class\"):", "\\ (ltable.facility_type_id == table.id) rows = db(query).select(table.name) types = [row.name", "100 characters)\") table.code.max_length = 100 table.comments.label = T(\"How people can", "= \"Network\" # Make Services Hierarchical settings.org.services_hierarchical = True #", ").first().id except: # Prepop hasn't been run series_id = None", "from s3 import S3SQLCustomForm, S3SQLInlineComponent # We default this onvalidation", "Uncomment to use have Filter form in Newsfeed be open", "None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ]", "here should be possible to disable safely (\"hrm\", Storage( name_nice", "import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") != \"Private Residence\" if r.interactive:", "= s3db.supply_item_pack_represent for item in items: item = \"%s %s", "result = True if not r.component and (r.interactive or r.representation", "Trainings settings.hrm.use_trainings = False # Uncomment to disable the use", "list_fields = list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if r.component_name", "= T(\"Theme\"), # #hidden = True, # ), S3LocationFilter(\"location.location_id\", label", "list_fields = list_fields, ) elif r.component_name == \"group_membership\": s3db.pr_group_membership.group_head.label =", "if callable(standard_prep): result = standard_prep(r) else: result = True s3db", "S3Represent(lookup=\"org_group\", show_link=True) crud_form = S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"),", "] s3db.configure(\"org_group\", list_fields = list_fields, ) if r.interactive: from gluon.html", "\"comments\", postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form = crud_form, )", "Make this optional? multiple = False, ), \"meetings\", \"comments\", )", "this to request the Mobile Phone when a user registers", "10 # )), #(\"dvr\", Storage( # name_nice = T(\"Disaster Victim", "= \"warehouse\" elif \"Medical Clinic\" in types: marker = \"hospital\"", "organisation_id = get_vars.get(\"(organisation)\", None) if organisation_id: field = s3db.hrm_human_resource.organisation_id field.default", "\"contact\", name = \"phone\", label = MOBILE, multiple = False,", "s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical org_service: #from", "\"\"\" form_vars = form.vars name = form_vars.get(\"name\", None) if name:", "options = \"RSS\" ) ), S3SQLInlineComponent( \"document\", name = \"iCal\",", "page settings.cms.location_click_filters = True # Uncomment to use organisation_id instead", "# module_type = 10 # )), #(\"dvr\", Storage( # name_nice", "Served\"), field = \"location_id\", filterby = dict(field = \"level\", options", "#settings.auth.registration_requests_site = True # Roles that newly-registered users get automatically", "\"%s %s\" % (skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\" % (item,", "gluon.html import A, URL from gluon.storage import Storage from s3", "# Uncomment to hide inv & req tabs from Sites", "S3SQLCustomForm, S3SQLInlineComponent # We default this onvalidation table.name.notnull = False", "\\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' % r.application", "by %(date)s\" % dict(priority=priority, date=date) else: title = priority body", "\"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields = list_fields, ) elif r.component_name", "= 10 )), (\"assess\", Storage( name_nice = T(\"Assessments\"), #description =", "table.group_type field.default = 3 # Relief Team, to show up", "organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3 = current.response.s3 #", "r.component_id) # Default rss_import = None else: ctable = s3db.pr_contact", "= False settings.pr.request_gender = False # Doesn't yet work (form", "to register to receive compensation and distributions\", # restricted =", "Hierarchical settings.org.services_hierarchical = True # Set the label for Sites", "db = current.db s3db = current.s3db rtable = s3db.req_req #", "r.method in (\"create\", \"update\"): get_vars = r.get_vars # Context from", "- Disabled until tested settings.ui.datatables_responsive = False # PDF to", "#settings.org.site_label = \"Location\" # Uncomment to show the date when", "\"job_title_id\", \"site_id\", ] if r.method in (\"create\", \"update\"): get_vars =", "crud_form = S3SQLCustomForm(*s3_sql_custom_fields) s3db.configure(r.tablename, crud_form = crud_form, list_fields = list_fields,", "customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep = s3.prep", "compensation and distributions\", # restricted = False, # module_type =", "= None crud_form = S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\",", "\"human_resource.job_title_id\"), (T(\"Office\"), \"human_resource.site_id\"), ] # Don't include Email/Phone for unauthenticated", "\"Food\" in types: marker = \"food\" elif \"Relief Site\" in", "S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ],", "s3db.org_facility field = table.location_id if r.method in (\"create\", \"update\"): field.label", "Uncomment this to request the Organisation when a user registers", "this to request the Site when a user registers #settings.auth.registration_requests_site", "field = \"location_id\", filterby = dict(field = \"level\", options =", "= \"office\" if settings.has_module(\"req\"): # Colour code by open/priority requests", "= T(\"Categories\"), field = \"activity_type_id\", cols = 3, # Filter", "= r.table tablename = \"project_project\" table.code.label = T(\"Project blurb (max.", "# Uncomment to disable the use of HR Description settings.hrm.use_description", "# Create a new Feed # pass # Add RSS", "Table ACLs # Enable this to have Open links in", "True, module_type = 1, )), (\"project\", Storage( name_nice = T(\"Projects\"),", "= s3db[tablename] if not r.component and r.method in (None, \"create\",", "= \"phone\", label = MOBILE, multiple = False, fields =", "Newsfeed settings.cms.richtext = True # Uncomment to show Links in", "an email address settings.hrm.email_required = False # Uncomment to allow", "S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create", "attr settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db =", "Resources Management\", # restricted = True, # module_type = 2,", "widget levels = (\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget =", "} settings.auth.registration_link_user_to_default = \"staff\" settings.security.policy = 5 # Controller, Function", "URL # Disable the old Contact one and link the", "Offices/Facilities settings.req.summary = True # ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs", "field.readable = field.writable = False table.name.label = T(\"Name\") table.description.label =", "a channel for this URL url_exists = db(table.url == rss_url).select(table.id,", "levels = (\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels,", "s3db.org_rheader(r, tabs=tabs) return output s3.postp = custom_postp return attr settings.customise_org_organisation_controller", "S3OptionsFilter(\"site_id\", hidden = True, ), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden", "T(\"Projects\"), #description = \"Tracking of Projects, Activities and Tasks\", restricted", "filterby = dict(field = \"profile\", # options=[True] # ) #", "org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"), # #hidden = True,", "# @ToDo: Widget to handle Start & End in 1!", "import IS_EMPTY_OR from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id", "#description = \"Create, enter, and manage surveys.\", restricted = True,", "are requested.\", restricted = True, module_type = 1, )), (\"project\",", "up\", # restricted = True, # module_type = 10 #", "# filterby = dict(field = \"profile\", # options=[True] # )", "is to be accessed from other modules. module_type = None,", "s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"email\", label = EMAIL, multiple", "import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets = [ S3TextFilter([\"name\", \"code\",", "automatically approved #settings.auth.always_notify_approver = False # Uncomment this to request", "Events (e.g. from Scenario templates) for allocation of appropriate Resources", "over-ridden for specific contexts later # e.g. Activities filtered to", "# Theme (folder to use for views/layout.html) settings.base.theme = \"NYC\"", "= S3SQLCustomForm( \"name\", \"location_id\", \"mission\", S3SQLInlineComponent( \"contact\", name = \"phone\",", "= \"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden = True,", "for the menu )), (\"appadmin\", Storage( name_nice = T(\"Administration\"), #description", "this module in the default menu & access the controller", "s3db.hrm_human_resource.site_id.default = None # ImageCrop widget doesn't currently work within", "= True, # module_type = 10, # )), # @ToDo:", "args=chair.id)) else: return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 =", "= { 1: T(\"Other Warehouse\") } settings.inv.send_types = { #21:", "(ltable.facility_type_id == table.id) rows = db(query).select(table.name) types = [row.name for", "list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # ----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3", "disable checking that LatLons are within boundaries of their parent", "\"office\" if settings.has_module(\"req\"): # Colour code by open/priority requests reqs", "= T(\"Photo\"), # multiple = False, # fields = [(\"\",", "= row.comments if row.type == 1: # Items ritable =", "settings.hrm.staff_experience = False # Uncomment to disable the use of", "# restricted = True, # module_type = None # This", "can see this module in the default menu (access to", "changed _after_ the 1st (admin) user is # registered in", "use have Filter form in Newsfeed be open by default", "code by open/priority requests reqs = record.reqs if reqs ==", "T(\"Network\"), fields = [(\"\", \"org_group_id\")], # @ToDo: Make this optional?", "\"Receiving and Sending Items\", restricted = True, module_type = 10", "= s3db.org_group list_fields = [\"name\", \"mission\", \"website\", \"meetings\", ] s3db.configure(\"org_group\",", "without an email address settings.hrm.email_required = False # Uncomment to", "controller module_type = None # This item is handled separately", "\"Manage Vehicles\", # restricted = True, # module_type = 10,", "[\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods", "Description settings.hrm.use_description = False # Change the label of \"Teams\"", "body = \"%s\\n%s\" % (item, body) else: # Skills body", "instead of AC field.widget = None script = \\ '''$.filterOptionsS3({", "None) if not types: # Hide Private Residences from s3", "- runs after controller customisation - but runs before prep", "rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not", "(\"appadmin\", Storage( name_nice = T(\"Administration\"), #description = \"Site Administration\", restricted", "False, module_type = None # No Menu )), (\"sync\", Storage(", "filter = {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", }, ),", "True, header = \"\", hidden = True, ), ] s3db", "), S3SQLInlineComponent( \"address\", label = T(\"Address\"), multiple = False, #", "= 'Lists \"who is doing what & where\". Allows relief", "resources. Matches against Inventories where supplies are requested.\", restricted =", "in DRRPP ], filterby = dict(field = \"role\", options =", "True if r.interactive or r.representation == \"aadata\": table = current.s3db.hrm_job_title", "rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first() # Build Title", "by an administrator prior to being able to login? settings.auth.registration_requires_approval", "settings.auth.registration_requires_verification = True # Do new users need to be", "\"start_date\", ) list_fields = [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"),", "\"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\"", "False, ), \"meetings\", \"comments\", ) filter_widgets = [ S3TextFilter([\"name\", \"description\",", "False #settings.req.use_commit = False settings.req.requester_optional = True settings.req.date_writable = False", "Uncomment to disable Staff experience settings.hrm.staff_experience = False # Uncomment", "= crud_form, filter_widgets = filter_widgets, list_fields = list_fields, ) return", "group & org_group controllers) - runs after controller customisation -", "but don't want inconsistent across tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\")", "label = T(\"URLs (media, fundraising, website, social media, etc.\"), fields", "T(\"Building Assessments\"), # #description = \"Building Safety Assessments\", # restricted", "), S3SQLInlineComponent( \"contact\", name = \"facebook\", label = T(\"Facebook\"), multiple", "label = T(\"End Date\"), hide_time = True, #hidden = True,", "True # Uncomment to not track pack values settings.inv.track_pack_values =", "IS_IMAGE() #image_field.widget = None from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields", "represent = \"%(name)s\", #hidden = True, ), S3LocationFilter(\"organisation_location.location_id\", label =", "rss and not rss.poll: # Remember that we don't wish", "vars = dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if", "= [(\"\", \"url\")], filterby = dict(field = \"name\", options=\"Data\" )", "= \"filter-search\", ), S3OptionsFilter(\"status_id\", label = T(\"Status\"), # Not translateable", "\"phone2\", label = T(\"Phone2\"), multiple = False, fields = [(\"\",", "# Uncomment to use org_group_id in Newsfeed settings.cms.organisation_group = \"post_organisation_group.group_id\"", "None # No Menu )), (\"sync\", Storage( name_nice = T(\"Synchronization\"),", "\"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp = custom_postp return attr settings.customise_pr_person_controller", "0 # Number formats (defaults to ISO 31-0) # Decimal", "True def customise_project_project_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep", "True # Uncomment this to request the Site when a", "= T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), #hidden =", "= T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter = True, header = \"\",", "= False # Uncomment to disable the use of HR", "enable the use of HR Education settings.hrm.use_education = False #", "label = T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), hidden", "True # ----------------------------------------------------------------------------- # Inventory Management # Uncomment to customise", "'optional':true })''' % r.application s3.jquery_ready.append(script) return result s3.prep = custom_prep", "# Enable this to change the label for 'Mobile Phone'", "= T(\"Projects\"), #description = \"Tracking of Projects, Activities and Tasks\",", "= standard_prep(r) if not result: return False from s3 import", "Storage( # name_nice = T(\"Budgeting Module\"), # #description = \"Allows", "s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db", "MultiSelectWidget #cols = 5, ), \"phone\", S3SQLInlineComponent( \"contact\", name =", "False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None # ImageCrop widget doesn't", "\"USD\" : T(\"United States Dollars\"), } settings.L10n.languages = OrderedDict([ (\"en\",", "= True, # ), S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels =", "the Organisation name in HR represents settings.hrm.show_organisation = True #", "Use codes for projects (called 'blurb' in NYC) settings.project.codes =", "= False, fields = [(\"\", \"url\")], filterby = dict(field =", "= s3db.msg_parser _id = table.insert(channel_id=channel_id, function_name=\"parse_rss\", enabled=True) s3db.msg_parser_enable(_id) # Check", "= s3db.get_config(\"org_organisation\", \"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result s3.prep = custom_prep", "\"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\" : [], # \"vol_volunteer_cluster.vol_cluster_position_id\" :", "# )), (\"req\", Storage( name_nice = T(\"Requests\"), #description = \"Manage", "fields = [(\"\", \"url\")], filterby = dict(field = \"name\", options=\"iCal\"", "#(\"support\", Storage( # name_nice = T(\"Support\"), # #description = \"Support", "(Human, Assets & Facilities).\", restricted = True, module_type = 10,", "dict(field = \"contact_method\", options = \"TWITTER\" ) ), S3SQLInlineComponent( \"contact\",", "for all (including Admin) #settings.org.dependent_fields = { \\ # \"pr_person_details.mother_name\"", ") field.widget = S3LocationSelectorWidget2(levels = (\"L2\",), points = True, polygons", "else: result = True if r.interactive: if r.component_name == \"facility\":", "in Newsfeed settings.cms.show_links = True # Uncomment to show Tags", "= True, module_type = 10 )), #(\"proc\", Storage( # name_nice", "== \"aadata\"): from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db =", "T(\"Phone2\"), multiple = False, fields = [(\"\", \"value\")], filterby =", "Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets = filter_widgets,", "= T(\"Partner Organizations\"), fields = [\"organisation_id\", \"comments\", # NB This", "= False # GeoNames username settings.gis.geonames_username = \"eden_nyc\" # Uncomment", "description or comments and by network name. You may use", "= T(\"Location\"), levels = (\"L1\", \"L2\", \"L3\", \"L4\"), hidden =", "= T(\"Address\"), multiple = False, # This is just Text", "search by by group name, description or comments and by", "Uncomment this to use Milestones in project/task. settings.project.milestones = False", "Inventory Requests settings.req.type_inv_label = \"Supplies\" # Uncomment to enable Summary", "S3TextFilter, S3HierarchyFilter filter_widgets = [ S3TextFilter([\"name\", \"acronym\"], label = T(\"Name\"),", "restricted = True, module_type = 10, )), (\"doc\", Storage( name_nice", "#(\"cr\", Storage( # name_nice = T(\"Shelters\"), # #description = \"Tracks", "True # Uncomment to use have Filter form in Newsfeed", "\"comments\", # NB This is labelled 'Role' in DRRPP ],", "menu )), (\"pr\", Storage( name_nice = T(\"Person Registry\"), #description =", "(& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) url_exists.update_record(name=name) if no_import: if url_exists.enabled:", "\"Stock\"]#, \"Summary\"] settings.req.prompt_match = False #settings.req.use_commit = False settings.req.requester_optional =", "in IFrames open a full page in a new tab", "\"Contacts\" # Uncomment to allow Staff & Volunteers to be", "filter_widgets = filter_widgets, list_fields = list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource", "= dict(field = \"contact_method\", options = \"RSS\" ) ), S3SQLInlineComponent(", "= \"Activate Events (e.g. from Scenario templates) for allocation of", "\"website\", \"meetings\", ] s3db.configure(\"org_group\", list_fields = list_fields, ) if r.interactive:", "# All Users (inc Anonymous) can see this module in", "= None if r.interactive or r.representation == \"aadata\": if not", "You may use % as wildcard. Press 'Search' without input", "New Status\")), parent=\"group_membership\", child=\"status_id\" )) crud_form = S3SQLCustomForm( \"name\", \"acronym\",", "T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC Prepared\") # Theme (folder to", "#description = \"Manage Vehicles\", # restricted = True, # module_type", "1: # Low marker = \"%s_green\" % marker mtable =", "\"contact_method\", options = \"TWITTER\" ) ), S3SQLInlineComponent( \"contact\", name =", "s3db[tablename] if not r.component and r.method in (None, \"create\", \"update\"):", "else: title = priority body = row.comments if row.type ==", "managing stock levels #settings.inv.direct_stock_edits = True # Uncomment to call", "S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget = S3MultiSelectWidget(multiple=False) if", "possible to disable safely (\"hrm\", Storage( name_nice = T(\"Contacts\"), #description", "== \"RSS\") & \\ (ctable.deleted == False) rss = current.db(query).select(ctable.poll,", "use Activities for projects settings.project.activities = True # Uncomment this", "& 1 for the URL # Disable the old Contact", "need to be approved by an administrator prior to being", "settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r, tablename): from gluon.html", "= \"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle = \"bootstrap\" settings.ui.filter_formstyle =", "Controller, Function & Table ACLs # Enable this to have", "] s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields =", "True # Uncomment to show post Titles in Newsfeed settings.cms.show_titles", "the approver of a new (verified) user, even if the", "settings.req.requester_label = \"Site Contact\" # Filter Requester as being from", "settings.inv.track_pack_values = False settings.inv.send_show_org = False # Types common to", "polygons = True, ) # Default location to Manhattan db", "= \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"), title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP)", "= custom_prep return attr settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- #", "for now # Ultimately should go into location_id$addr_street fields =", "# Component if r.component_id: # Update form db = current.db", "\"vol_volunteer.active\" : [], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\" :", "), #S3SQLInlineComponent( # \"image\", # name = \"image\", # label", "output s3.postp = custom_postp return attr settings.customise_org_organisation_controller = customise_org_organisation_controller #", "Dollars\"), } settings.L10n.languages = OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"), ])", "being able to login? settings.auth.registration_requires_approval = True # Always notify", "less accountable) process for managing stock levels #settings.inv.direct_stock_edits = True", "row = row.pr_group try: group_id = row.id except: # not", "S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db if r.tablename == \"org_organisation\":", "import S3AddResourceLink current.s3db.req_req.site_id.comment = \\ S3AddResourceLink(c=\"org\", f=\"facility\", vars = dict(child=\"site_id\"),", "manhattan.id table.mission.readable = table.mission.writable = True table.meetings.readable = table.meetings.writable =", "# Custom label for Organisations in HR module #settings.hrm.organisation_label =", "(\"L3\", \"L4\"), #hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"),", "), \"meetings\", \"comments\", postprocess = pr_contact_postprocess, ) s3db.configure(\"org_group\", crud_form =", "= False # Uncomment to allow Staff & Volunteers to", "False # Uncomment to enable the use of HR Education", "row.type == 1: # Items ritable = s3db.req_req_item items =", "rss = db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and not", "] # Don't include Email/Phone for unauthenticated users if current.auth.is_logged_in():", "= table.location_id if r.method in (\"create\", \"update\"): field.label = \"\"", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"EMAIL\")),", "settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects # Use codes", "(T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"]", "Storage( # name_nice = T(\"Shelters\"), # #description = \"Tracks the", "from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\",", "Staff & Volunteers to be registered without an Organisation settings.hrm.org_required", "return elif name_exists.enabled: # Nothing to do :) return else:", "list_fields = [\"name\", (T(\"Type\"), \"organisation_organisation_type.organisation_type_id\"), (T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"),", "{ \\ # \"pr_person_details.mother_name\" : [], # \"pr_person_details.father_name\" : [],", "s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\", \"last_name\", S3SQLInlineComponent(", "settings.hrm.use_trainings = False # Uncomment to disable the use of", "\"%s\\n%s\" % (row.purpose, body) rstable = s3db.req_req_skill skills = db(rstable.req_id", "cache=s3db.cache, limitby=(0, 1) ).first().id except: # Prepop hasn't been run", "\"phone\", S3SQLInlineComponent( \"contact\", name = \"phone2\", label = T(\"Phone2\"), multiple", "one to this Contact # and ensure active or not", "rss_import = \"on\" else: # Default rss_import = None else:", "\"WORK_PHONE\" ) ), S3SQLInlineComponent( \"contact\", name = \"email\", label =", "widget #field.requires = IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels", "use for views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle", "T(\"Search\"), comment = T(\"You can search by by group name,", "= [(\"\", \"location_id\")], ), # Partner Orgs S3SQLInlineComponent( \"organisation\", name", "Storage( name_nice = T(\"Supply Chain Management\"), #description = \"Used within", "filtered to those of parent Project settings.gis.countries = (\"US\",) settings.fin.currencies", ")), #(\"proc\", Storage( # name_nice = T(\"Procurement\"), # #description =", "S3OptionsFilter, S3LocationFilter filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label", "return # Check if we already have a channel for", "in output: # Custom Tabs tabs = [(T(\"Basic Details\"), None),", "module_type = 1, )), (\"project\", Storage( name_nice = T(\"Projects\"), #description", "current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3 = current.response.s3 # Custom", "label = T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"status_id\", label =", "----------------------------------------------------------------------------- def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep", "form, record, representation): if not current.auth.user: # Don't include prepop", "url_exists.channel_id) return elif no_import: # Nothing to do :) return", ").first() if url_exists: # Either Contact has changed Name or", "else: # Create form: Default rss_import = None mtable =", "s3db = current.s3db s3 = current.response.s3 # Tell the client", "customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db s3 =", "with # another Contact # - update Feed name url_exists.update_record(name=name)", "the use of Organisation Groups settings.org.groups = \"Network\" # Make", "@ToDo: Send out Tweets \"\"\" req_id = form.vars.id db =", "dict(id=_id) s3db.update_super(ptable, record) # Add source link url = \"%s%s\"", "if not r.component and (r.interactive or r.representation == \"aadata\"): from", "table.code.max_length = 100 table.comments.label = T(\"How people can help\") script", "a user registers settings.auth.registration_requests_organisation = True # Uncomment this to", "Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"), (T(\"Projects\"), \"project\"), (T(\"Assets\"), \"asset\"),", "for Sites settings.org.site_label = \"Facility\" #settings.org.site_label = \"Location\" # Uncomment", "new Feed # pass # Add RSS Channel _id =", "S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods Served\"), field = \"location_id\", filterby", "\"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent(", "= dict(id=_id) s3db.update_super(table, record) # Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\",", "fields = [(\"\", \"facility_type_id\")], multiple = False, required = True,", ") from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter filter_widgets =", "= filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\")", "for the menu )), # Uncomment to enable internal support", "\"staff\" settings.security.policy = 5 # Controller, Function & Table ACLs", "\"code\", \"description\", \"status_id\", \"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", #", "Awareness & Geospatial Analysis\", restricted = True, module_type = 9,", "# #description = \"Allows a Budget to be drawn up\",", "'Stock Counts' settings.inv.stock_count = True # Uncomment to not track", "= True # Label for Inventory Requests settings.req.type_inv_label = \"Supplies\"", "as Contacts they create r.component.table.site_id.default = None return result s3.prep", "if r.interactive and isinstance(output, dict): if \"rheader\" in output: #", "controller customisation - but runs before prep \"\"\" s3db =", "settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False #settings.req.use_req_number = False #", "filterby = dict(field = \"contact_method\", options = \"TWITTER\" ) ),", "= False, required = True, ), \"name\", \"location_id\", ) s3db.configure(tablename,", "\"L4\"), #hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\", #label = T(\"Service\"), #hidden", "Letter settings.base.paper_size = T(\"Letter\") # Restrict the Location Selector to", "= False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default = None # ImageCrop widget", "settings.modules = OrderedDict([ # Core modules which shouldn't be disabled", "attr settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller # ----------------------------------------------------------------------------- # Projects # Use", "IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label = \"\" # Gets", "@ToDo: Port these Assessments to the Survey module #(\"building\", Storage(", "#description = \"Tracking of Projects, Activities and Tasks\", restricted =", "% (skill.quantity, skill_represent(skill.skill_id)) body = \"%s\\n%s\" % (item, body) #", "if no_import: if name_exists.enabled: # Disable channel (& associated parsers)", "module #settings.hrm.organisation_label = \"National Society / Branch\" settings.hrm.organisation_label = \"Organization\"", "if data: # RSS feed is being deleted, so we", "% (item, body) # Lookup series_id stable = s3db.cms_series try:", "= False, #widget = \"hierarchy\", ), S3SQLInlineComponent( \"group_membership\", label =", "crud_form = S3SQLCustomForm( \"organisation_id\", \"name\", \"code\", \"description\", \"status_id\", \"start_date\", \"end_date\",", "(settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # ----------------------------------------------------------------------------- def", "Custom Tabs tabs = [(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"),", "in the default menu (access to controller is possible to", "the use of HR Education settings.hrm.use_education = False # Uncomment", "S3LocationFilter, S3OptionsFilter, S3TextFilter # activate hierarchical org_service: #from s3 import", "== False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss", "or \\ form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record: # Update", "(inc Anonymous) can see this module in the default menu", "_after_ the 1st (admin) user is # registered in order", "library of digital resources, such as photos, documents and reports\",", "\"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields = list_fields, )", "= standard_prep(r) else: result = True s3db = current.s3db #if", "Field.Method(\"chairperson\", chairperson) # Format for filter_widgets & imports s3db.add_components(\"pr_group\", org_group_team", "\"image\", # name = \"image\", # label = T(\"Photo\"), #", "mtable.height, mtable.width, cache=s3db.cache, limitby=(0, 1) ).first() return marker # -----------------------------------------------------------------------------", "associated with # another Contact # - update Feed name", "(row.purpose, body) rstable = s3db.req_req_skill skills = db(rstable.req_id == req_id).select(rstable.skill_id,", "r.component_id).select(otable.pe_id, limitby=(0, 1) ).first() try: pe_id = org.pe_id except: current.log.error(\"Org", "Newsfeed settings.cms.organisation = \"post_organisation.organisation_id\" # Uncomment to use org_group_id in", "a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) # ----------------------------------------------------------------------------- def customise_org_facility_controller(**attr): s3db", "s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return else: # Check if we already have", "parsers) s3db.msg_channel_disable(\"msg_rss_channel\", name_exists.channel_id) return elif name_exists.enabled: # Nothing to do", "Increase size of widget from s3 import s3_comments_widget table.description.widget =", "S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\", label =", "# ----------------------------------------------------------------------------- # Comment/uncomment modules here to disable/enable them settings.modules", "registers #settings.auth.registration_requests_site = True # Roles that newly-registered users get", "current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False table.type.readable = table.type.writable =", "query = (ctable.pe_id == r.record.pe_id) & \\ (ctable.contact_method == \"RSS\")", "Decimal separator for numbers (defaults to ,) settings.L10n.decimal_separator = \".\"", "show the chairperson of a group \"\"\" if hasattr(row, \"pr_group\"):", "s3db.msg_rss_channel name_exists = db(table.name == name).select(table.id, table.channel_id, table.enabled, table.url, limitby", "\"L3\", \"L4\"), #hidden = True, ), # @ToDo: Widget to", "People\", restricted = True, access = \"|1|\", # Only Administrators", "request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom PreP standard_prep =", "= s3db.org_rheader(r, tabs=tabs) return output s3.postp = custom_postp return attr", "# Multiple partner organizations settings.project.multiple_organisations = True def customise_project_project_controller(**attr): s3", "T(\"Content Management\"), #description = \"Content Management System\", restricted = True,", "views/layout.html) settings.base.theme = \"NYC\" settings.ui.formstyle_row = \"bootstrap\" settings.ui.formstyle = \"bootstrap\"", "modules here to disable/enable them settings.modules = OrderedDict([ # Core", "= [ S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ], label = T(\"Search\"),", "when clicking on locations instead of opening the profile page", "r.application s3.jquery_ready.append(script) return result s3.prep = custom_prep return attr settings.customise_hrm_human_resource_controller", "ritable = s3db.req_req_item items = db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity)", "= db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default = manhattan.id table.mission.readable", "= s3db.hrm_human_resource.site_id field.default = site_id field.readable = field.writable = False", "= None, # All Users (inc Anonymous) can see this", "crud_form, list_fields = list_fields, ) settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource # -----------------------------------------------------------------------------", "\"project\"), (T(\"Assets\"), \"asset\"), ] output[\"rheader\"] = s3db.org_rheader(r, tabs=tabs) return output", "IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 table = s3db.org_facility field = table.location_id if r.method", "= (\"NYC\",) settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC Prepared\")", "multiple = False, fields = [(\"\", \"value\")], filterby = dict(field", "an administrator prior to being able to login? settings.auth.registration_requires_approval =", "limitby=(0, 1)).first() if manhattan: field.default = manhattan.id table.mission.readable = table.mission.writable", "\"on\" else: # Default rss_import = None else: # Create", "Nothing to do :) return else: # Enable channel (&", "the client to request per-feature markers s3db.configure(\"org_facility\", marker_fn=facility_marker_fn) # Custom", "= \"name\" # Record Approval settings.auth.record_approval = True settings.auth.record_approval_required_for =", "= record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser table = s3db.msg_parser", "page in a new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments =", "full page in a new tab settings.ui.iframe_opens_full = True settings.ui.label_attachments", "Can't validate image without the file # image_field = s3db.pr_image.image", "name in HR represents settings.hrm.show_organisation = True # Uncomment to", "S3SQLInlineComponent( \"document\", name = \"media\", label = T(\"URLs (media, fundraising,", "[(\"\", \"value\")], filterby = dict(field = \"contact_method\", options = \"RSS\"", "request the Organisation when a user registers settings.auth.registration_requests_organisation = True", "\"org_group_id\")], # @ToDo: Make this optional? multiple = False, ),", "postprocess = pr_contact_postprocess, ) from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter", "registered without an Organisation settings.hrm.org_required = False # Uncomment to", "'Staff' settings.hrm.staff_label = \"Contacts\" # Uncomment to allow Staff &", "Low marker = \"%s_green\" % marker mtable = db.gis_marker try:", "= table.group_type field.default = 3 # Relief Team, to show", "to \"Groups\" settings.hrm.teams = \"Groups\" # Custom label for Organisations", "def customise_hrm_job_title_controller(**attr): s3 = current.response.s3 # Custom prep standard_prep =", "\"person_id$last_name\", ], label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter = True,", "= \"Synchronization\", restricted = True, access = \"|1|\", # Only", "table = db.org_facility_type ltable = db.org_site_facility_type query = (ltable.site_id ==", "dict): if \"form\" in output: output[\"form\"].add_class(\"pr_person\") elif \"item\" in output", "\"group_membership\": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\",", "T(\"RSS\"), multiple = False, fields = [(\"\", \"value\")], filterby =", "\"Summary\"] settings.req.prompt_match = False #settings.req.use_commit = False settings.req.requester_optional = True", "widget doesn't currently work within an Inline Form #image_field =", "list_fields = list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def", "= custom_prep return attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def", "settings.cms.richtext = True # Uncomment to show Links in Newsfeed", "or r.representation == \"aadata\": if not r.component: hr_fields = [\"organisation_id\",", "no_import: # Nothing to do :) return #else: # #", "table.mission.writable = True table.meetings.readable = table.meetings.writable = True if r.id:", "to hide inv & req tabs from Sites #settings.org.site_inv_req_tabs =", "to show up in hrm/group field.readable = field.writable = False", "name_nice = T(\"Members\"), # #description = \"Membership Management System\", #", "r.representation == \"aadata\": if not r.component: from s3 import S3TextFilter,", "options = \"FACEBOOK\" ) ), \"meetings\", \"comments\", postprocess = pr_contact_postprocess,", "label = T(\"Search\"), comment = T(\"You can search by by", "GroupedCheckbox Widget or Hierarchical MultiSelectWidget #cols = 5, ), \"phone\",", "Tabs tabs = [(T(\"Basic Details\"), None), (T(\"Contacts\"), \"human_resource\"), (T(\"Facilities\"), \"facility\"),", "= \"Building Safety Assessments\", # restricted = True, # module_type", "dropdown instead of AC field.widget = None script = \\", "Country Code for telephone numbers settings.L10n.default_country_code = 1 # Enable", "title = priority body = row.comments if row.type == 1:", "# ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3 = current.response.s3 #", "[], # \"pr_person_details.father_name\" : [], # \"pr_person_details.company\" : [], #", "tablename = \"project_project\" table.code.label = T(\"Project blurb (max. 100 characters)\")", "\"%s_green\" % marker mtable = db.gis_marker try: marker = db(mtable.name", "current.log.error(\"Org %s not found: cannot set rss_import correctly\" % r.component_id)", "\"Tracks the location, capacity and breakdown of victims in Shelters\",", "label = T(\"Phone2\"), multiple = False, fields = [(\"\", \"value\")],", "Enable channel_id = record[\"channel_id\"] s3db.msg_channel_enable(\"msg_rss_channel\", channel_id) # Setup Parser table", "= address else: # We need a default form_vars.name =", "as Contacts they create field.default = None # Use a", "current.deployment_settings \"\"\" Template settings for NYC Prepared \"\"\" # Pre-Populate", "use of HR Certificates settings.hrm.use_certificates = False # Uncomment to", "= T(\"Project blurb (max. 100 characters)\") table.code.max_length = 100 table.comments.label", "T(\"Facility Type\"), fields = [(\"\", \"facility_type_id\")], multiple = False, required", "S3SQLInlineComponentCheckbox s3db = current.s3db table = r.table tablename = \"project_project\"", "name_nice = T(\"Assessments\"), #description = \"Rapid Assessments & Flexible Impact", "form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record: #", "T(\"Requests\"), #description = \"Manage requests for supplies, assets, staff or", "(0, 1) ).first() if url_exists: # Either Contact has changed", "T(\"Neighborhood\"), levels = (\"L3\", \"L4\"), #hidden = True, ), S3OptionsFilter(\"service_organisation.service_id\",", "\"Groups\" settings.hrm.teams = \"Groups\" # Custom label for Organisations in", "= \"Organization\" # ----------------------------------------------------------------------------- def customise_hrm_human_resource_controller(**attr): s3 = current.response.s3 #", "filter_widgets, list_fields = list_fields, ) s3db.pr_group_membership.group_head.label = T(\"Group Chairperson\") if", "custom_prep return attr settings.customise_org_facility_controller = customise_org_facility_controller # ----------------------------------------------------------------------------- def customise_org_organisation_resource(r,", "= current.s3db table = db.org_facility_type ltable = db.org_site_facility_type query =", "= current.s3db table = r.table tablename = \"project_project\" table.code.label =", "\"facebook\", label = T(\"Facebook\"), multiple = False, fields = [(\"\",", "# activate hierarchical org_service: #S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"), #", "1) ).first() if url_exists: # We have 2 feeds: 1", "the label for 'Postcode' settings.ui.label_postcode = \"ZIP Code\" # Uncomment", "== \"group_membership\": from s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\",", "attr settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller # ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\"", "the URL # Disable the old Contact one and link", "= False # Uncomment this to use Activities for projects", "req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill in skills: item", "current.s3db from s3 import S3SQLCustomForm, S3SQLInlineComponent crud_form = S3SQLCustomForm(\"person_id\", \"organisation_id\",", "module_type = None # No Menu )), (\"errors\", Storage( name_nice", "registered in order to secure the deployment # Should users", "a simplified version, but don't want inconsistent across tabs #", "# These settings should be changed _after_ the 1st (admin)", "Projects, Activities and Tasks\", restricted = True, module_type = 10", "module in the default menu & access the controller module_type", "field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False, reverse_lx=True, show_address=True, show_postcode=True, ) table.organisation_id.widget =", "False) rss = current.db(query).select(ctable.poll, limitby=(0, 1) ).first() if rss and", "# image_field.requires = None if r.interactive or r.representation == \"aadata\":", "for users settings.L10n.utc_offset = \"UTC -0500\" # Uncomment these to", "S3TextFilter([\"name\", \"description\", \"comments\", \"group_team.org_group_id$name\", ], label = T(\"Search\"), comment =", "settings # These settings should be changed _after_ the 1st", "None else: # Create form: Default rss_import = None else:", "\"%s_yellow\" % marker elif reqs == 1: # Low marker", "submit) #settings.pr.select_existing = False settings.pr.show_emergency_contacts = False # ----------------------------------------------------------------------------- #", "to Hide the language toolbar settings.L10n.display_toolbar = False # Default", "menu # )), (\"gis\", Storage( name_nice = T(\"Map\"), #description =", "from same org/site as Contacts they create field.default = None", "= dict(field = \"contact_method\", options = \"TWITTER\" ) ), S3SQLInlineComponent(", "a hierarchical dropdown instead of AC field.widget = None script", "current.s3db if r.tablename == \"org_organisation\": if r.id: # Update form", "= \"index\" # Do new users need to verify their", "if rss and not rss.poll: # Remember that we don't", "show up in hrm/group field.readable = field.writable = False table.name.label", "register to receive compensation and distributions\", # restricted = False,", "chairperson of a group \"\"\" if hasattr(row, \"pr_group\"): row =", "rss_import = None mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget", "Management\"), #description = \"Content Management System\", restricted = True, module_type", "Import Feed\")), name = \"rss\", label = T(\"RSS\"), multiple =", "and reports\", restricted = True, module_type = None, )), (\"msg\",", "\"%(name)s\", cols = 3, ), #S3OptionsFilter(\"theme_project.theme_id\", # label = T(\"Theme\"),", "10 )), (\"assess\", Storage( name_nice = T(\"Assessments\"), #description = \"Rapid", "disable Sectors in projects settings.project.sectors = False # Multiple partner", "MOBILE, multiple = False, fields = [(\"\", \"value\")], filterby =", "= IS_LOCATION_SELECTOR2(levels = (\"L2\",)) field.requires = IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",))", "(gtable.name == \"New York\") & \\ (gtable.level == \"L2\") manhattan", "= \"service_id\", # activate hierarchical org_service: #leafonly = False, #widget", "series_id stable = s3db.cms_series try: series_id = db(stable.name == \"Request\").select(stable.id,", "== \"L2\") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default", "& Flexible Impact Assessments\", restricted = True, module_type = 5,", "Needs' tab for Offices/Facilities settings.req.summary = True # ----------------------------------------------------------------------------- def", "Number formats (defaults to ISO 31-0) # Decimal separator for", "# ), S3LocationFilter(\"location.location_id\", label = T(\"Location\"), levels = (\"L1\", \"L2\",", "Storage( # name_nice = T(\"Support\"), # #description = \"Support Requests\",", "Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return else: #", "track pack values settings.inv.track_pack_values = False settings.inv.send_show_org = False #", "= form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none if not rss_url: if form.record:", "False # Uncomment to disable the use of HR Description", "Python 2.7 from collections import OrderedDict except: # Python 2.6", ":) return else: # Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\",", "name = \"partner\", label = T(\"Partner Organizations\"), fields = [\"organisation_id\",", "True settings.req.date_writable = False settings.req.item_quantities_writable = True settings.req.skill_quantities_writable = True", "Thousands separator for numbers (defaults to space) settings.L10n.thousands_separator = \",\"", "specific contexts later # e.g. Activities filtered to those of", "db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first() if chair: # Only", "= T(\"Supply Chain Management\"), #description = \"Used within Inventory Management,", "crud_form = crud_form, ) elif r.component_name == \"pr_group\": list_fields =", "True if not r.component and (r.interactive or r.representation == \"aadata\"):", "Manhattan db = current.db gtable = db.gis_location query = (gtable.name", "= T(\"Locations\"), #description = 'Lists \"who is doing what &", "_name=\"rss_no_import\", value = rss_import, ), T(\"Don't Import Feed\")), name =", "old.channel_id) return else: # Nothing to do :) return #", "# Uncomment this to use Activities for projects settings.project.activities =", "label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter = True, header =", "All Users (inc Anonymous) can see this module in the", "r.interactive and isinstance(output, dict): if \"rheader\" in output: # Custom", ") return result s3.prep = custom_prep return attr settings.customise_project_project_controller =", "filter_widgets, list_fields = list_fields, ) return result s3.prep = custom_prep", "current.auth.user: # Don't include prepop return False if tablename in", "disable the use of HR Description settings.hrm.use_description = False #", "S3SQLInlineComponent( \"document\", name = \"iCal\", label = \"iCAL\", multiple =", "the site otable = s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id,", "RHeader wants a simplified version, but don't want inconsistent across", "db(ritable.req_id == req_id).select(ritable.item_id, ritable.item_pack_id, ritable.quantity) item_represent = s3db.supply_item_represent pack_represent =", "== name).select(table.id, table.channel_id, table.enabled, table.url, limitby = (0, 1) ).first()", "preferential order if \"Hub\" in types: marker = \"warehouse\" elif", "\"bootstrap\" settings.ui.filter_formstyle = \"table_inline\" settings.msg.parser = \"NYC\" # Uncomment to", "5 # Controller, Function & Table ACLs # Enable this", "# #description = \"Tracks the location, capacity and breakdown of", "), S3OptionsFilter(\"status_id\", label = T(\"Status\"), # Not translateable #represent =", "\"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ] s3db.configure(\"org_organisation\", crud_form = crud_form,", "label = T(\"RSS\"), multiple = False, fields = [(\"\", \"value\")],", "Sending Items\", restricted = True, module_type = 10 )), #(\"proc\",", "else: # Default rss_import = None else: # Create form:", "= True # Uncomment to adjust filters in Newsfeed when", "Management # Uncomment to customise the label for Facilities in", "in (\"create\", \"update\"): field.label = \"\" # Gets replaced by", "return attr settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def", "dict(field = \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"), field", "True # Do new users need to be approved by", "= None # ImageCrop widget doesn't currently work within an", "child=\"status_id\" )) crud_form = S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field", "return result s3.prep = custom_prep # Custom postp standard_postp =", "S3SQLCustomForm(\"name\", \"description\", S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields = [(\"\", \"org_group_id\")],", "Organisation/Network RSS Feeds \"\"\" s3db = current.s3db form_vars = form.vars", "(T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", list_fields = list_fields, ) elif", "Contacts they create r.component.table.site_id.default = None return result s3.prep =", "name_nice = T(\"Inventory\"), #description = \"Receiving and Sending Items\", restricted", "\"Rapid Assessments & Flexible Impact Assessments\", restricted = True, module_type", "data = old_rss = json.loads(old_rss)[\"data\"] if data: # RSS feed", "default settings.cms.filter_open = True # Uncomment to adjust filters in", "None, )), (\"msg\", Storage( name_nice = T(\"Messaging\"), #description = \"Sends", "users settings.L10n.utc_offset = \"UTC -0500\" # Uncomment these to use", "= T(\"Type\"), #hidden = True, ), ] list_fields = [\"name\",", "\"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"), \"group_person.group_id\"), (T(\"Groups\"), \"person_id$group_membership.group_id\"), \"site_id\", #\"site_contact\", (T(\"Email\"),", "site_id field.readable = field.writable = False hr_fields.remove(\"site_id\") else: s3db.hrm_human_resource.site_id.default =", "levels = (\"L1\", \"L2\", \"L3\", \"L4\"), #hidden = True, ),", "= T(\"Content Management\"), #description = \"Content Management System\", restricted =", "= (\"US\",) settings.fin.currencies = { \"USD\" : T(\"United States Dollars\"),", "if r.method in (\"create\", \"update\"): get_vars = r.get_vars # Context", "title=T(\"Create Person\"), tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants a simplified", "item is handled separately for the menu )), # Uncomment", "= T(\"Name\"), _class = \"filter-search\", ), S3OptionsFilter(\"status_id\", label = T(\"Status\"),", "= T(\"Letter\") # Restrict the Location Selector to just certain", "audit_write # ----------------------------------------------------------------------------- # CMS # Uncomment to use Bookmarks", "= T(\"Neighborhood\"), levels = (\"L3\", \"L4\"), #hidden = True, ),", "# Uncomment to disable the use of HR Certificates settings.hrm.use_certificates", "group name, description or comments and by network name. You", "\"start_date\", \"end_date\", \"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent( \"location\",", "= True, module_type = 5, )), (\"event\", Storage( name_nice =", "approved #settings.auth.always_notify_approver = False # Uncomment this to request the", "call Stock Adjustments, 'Stock Counts' settings.inv.stock_count = True # Uncomment", "# Update form db = current.db otable = s3db.org_organisation org", "# Roles that newly-registered users get automatically #settings.auth.registration_roles = {", "Text - put into the Comments box for now #", "S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent s3db = current.s3db s3db.org_group_team.org_group_id.represent = S3Represent(lookup=\"org_group\", show_link=True)", "the default menu (access to controller is possible to all", "Storage( name_nice = T(\"Map\"), #description = \"Situation Awareness & Geospatial", "# Uncomment if you need a simpler (but less accountable)", "Private Residences from s3 import FS s3.filter = FS(\"site_facility_type.facility_type_id$name\") !=", "----------------------------------------------------------------------------- # Organisations # # Enable the use of Organisation", "to space) settings.L10n.thousands_separator = \",\" # Default Country Code for", "and isinstance(output, dict): if \"form\" in output: output[\"form\"].add_class(\"pr_person\") elif \"item\"", "EMAIL = T(\"Email\") list_fields += [(MOBILE, \"phone.value\"), (EMAIL, \"email.value\"), ]", "be changed _after_ the 1st (admin) user is # registered", "this to disable Sectors in projects settings.project.sectors = False #", "settings.base.system_name = T(\"NYC Prepared\") settings.base.system_name_short = T(\"NYC Prepared\") # Theme", "S3SQLInlineComponent( \"address\", label = T(\"Address\"), multiple = False, # This", "Assets & Facilities).\", restricted = True, module_type = 10, )),", "the label for Sites settings.org.site_label = \"Facility\" #settings.org.site_label = \"Location\"", "= dict(field = \"contact_method\", options = \"EMAIL\")), ) crud_form =", ") ), S3SQLInlineComponent( \"document\", name = \"iCal\", label = \"iCAL\",", "collections import OrderedDict except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import", "= T(\"RSS\"), multiple = False, fields = [(\"\", \"value\")], filterby", "= current.db mtable = current.s3db.pr_group_membership ptable = db.pr_person query =", "s3db.org_site location_id = db(otable.site_id == row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id #", "rtable.purpose, rtable.comments, limitby=(0, 1) ).first() # Build Title & Body", "= T(\"Members\"), # #description = \"Membership Management System\", # restricted", "to have Open links in IFrames open a full page", "settings.ui.iframe_opens_full = True settings.ui.label_attachments = \"Media\" settings.ui.update_label = \"Edit\" #", "S3SQLInlineComponent( \"contact\", name = \"phone\", label = MOBILE, multiple =", "skill_represent = s3db.hrm_multi_skill_represent for skill in skills: item = \"%s", "!= \"Private Residence\" if r.interactive: tablename = \"org_facility\" table =", "= [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label = T(\"Name\"), ),", "& households to register to receive compensation and distributions\", #", "Widget to handle Start & End in 1! S3DateFilter(\"start_date\", label", "[(\"\", \"location_id\")], ), # Partner Orgs S3SQLInlineComponent( \"organisation\", name =", "s3db.configure(\"pr_group\", list_fields = list_fields, ) elif r.component_name == \"organisation\": #", "== req_id).select(rstable.skill_id, rstable.quantity) skill_represent = s3db.hrm_multi_skill_represent for skill in skills:", "Uncomment to use an Autocomplete for Site lookup fields settings.org.site_autocomplete", "name = \"data\", label = T(\"Data\"), multiple = False, fields", "# Do new users need to be approved by an", "current.s3db.pr_group_membership ptable = db.pr_person query = (mtable.group_id == group_id) &", "field = s3db.hrm_human_resource.site_id field.default = site_id field.readable = field.writable =", "= None # This item is handled separately for the", "IS_EMPTY_OR( IS_LOCATION_SELECTOR2(levels = (\"L2\",)) ) field.widget = S3LocationSelectorWidget2(levels = (\"L2\",),", "True # Always notify the approver of a new (verified)", "# ----------------------------------------------------------------------------- def customise_hrm_human_resource_resource(r, tablename): \"\"\" Customise hrm_human_resource resource (in", "else: # Unknown marker = \"office\" if settings.has_module(\"req\"): # Colour", "(ctable.contact_method == \"RSS\") & \\ (ctable.deleted == False) rss =", "= T(\"Person Registry\"), #description = \"Central point to record details", "and hasattr(output[\"item\"], \"add_class\"): output[\"item\"].add_class(\"pr_person\") return output s3.postp = custom_postp return", "Management System\", # restricted = True, # module_type = 10,", ") ), S3SQLInlineComponent( \"contact\", name = \"email\", label = T(\"Email\"),", ") ), \"website\", S3SQLInlineComponent( \"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value", "week on Sunday settings.L10n.firstDOW = 0 # Number formats (defaults", "if callable(standard_prep): result = standard_prep(r) if not result: return False", ")), (\"asset\", Storage( name_nice = T(\"Assets\"), #description = \"Recording and", "= True, access = \"|1|\", # Only Administrators can see", "import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox s3db = current.s3db table = r.table", "Chairperson\") return result s3.prep = custom_prep # Custom postp standard_postp", "], filterby = dict(field = \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label", "Milestones in project/task. settings.project.milestones = False # Uncomment this to", "not result: return False from s3 import S3Represent, S3TextFilter, S3OptionsFilter,", "= T(\"Surveys\"), #description = \"Create, enter, and manage surveys.\", restricted", "# Not translateable #represent = \"%(name)s\", cols = 3, ),", "\"Supplies\" # Uncomment to enable Summary 'Site Needs' tab for", "is that of the site otable = s3db.org_site location_id =", "# Need to re-do list_fields as get over_written by hrm_group_controller()", "NYC) settings.project.codes = True # Uncomment this to use settings", "\"location.name\"), ] s3db.configure(\"org_organisation\", crud_form = crud_form, filter_widgets = filter_widgets, list_fields", "LatLons are within boundaries of their parent #settings.gis.check_within_parent_boundaries = False", "\"%(priority)s by %(date)s\" % dict(priority=priority, date=date) else: title = priority", "templates) for allocation of appropriate Resources (Human, Assets & Facilities).\",", "else: s3db.hrm_human_resource.site_id.default = None # ImageCrop widget doesn't currently work", "True s3db = current.s3db #if r.method == \"validate\": # #", "OrderedDict from gluon import current from gluon.html import A, URL", "#S3HierarchyFilter(\"service_organisation.service_id\", # #label = T(\"Service\"), # #hidden = True, #", "), S3SQLInlineComponent( \"contact\", name = \"email\", label = T(\"Email\"), multiple", ") s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"email\", label = EMAIL,", "True, module_type = 10, )), (\"doc\", Storage( name_nice = T(\"Documents\"),", "= \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id', 'lookupResource':'site', 'lookupURL':'/%s/org/sites_for_org/', 'optional':true })''' %", "Uncomment to Hide the language toolbar settings.L10n.display_toolbar = False #", "coordinate their activities', restricted = True, module_type = 4 )),", "\"description\", \"comments\", \"group_team.org_group_id$name\", ], label = T(\"Search\"), comment = T(\"You", "hrm/group field.readable = field.writable = False table.name.label = T(\"Name\") table.description.label", "S3SQLInlineComponent(\"group_team\", label = T(\"Network\"), fields = [(\"\", \"org_group_id\")], # @ToDo:", "\"organisation.name\", \"organisation.acronym\", ], label = T(\"Name\"), _class = \"filter-search\", ),", "= T(\"How people can help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form", "list_fields, ) if r.interactive: from gluon.html import DIV, INPUT from", "= custom_prep if current.auth.s3_logged_in(): # Allow components with components (such", "True, module_type = 10 )), (\"assess\", Storage( name_nice = T(\"Assessments\"),", "Names not Emails settings.ui.auth_user_represent = \"name\" # Record Approval settings.auth.record_approval", "we already have a channel for this Contact db =", "== 1: # Low marker = \"%s_green\" % marker mtable", "= (\"L1\", \"L2\", \"L3\", \"L4\"), #hidden = True, ), #", "Set the label for Sites settings.org.site_label = \"Facility\" #settings.org.site_label =", "\"A library of digital resources, such as photos, documents and", "= \"Tracking of Projects, Activities and Tasks\", restricted = True,", "marker_fn=facility_marker_fn) # Custom PreP standard_prep = s3.prep def custom_prep(r): #", "tooltip=current.messages.AUTOCOMPLETE_HELP) #else: # # RHeader wants a simplified version, but", "user registers #settings.auth.registration_requests_site = True # Roles that newly-registered users", "in list view so HTML is OK return A(s3_fullname(chair), _href=URL(c=\"hrm\",", "Default location to Manhattan db = current.db gtable = db.gis_location", "form in Newsfeed be open by default settings.cms.filter_open = True", "= \"contact_method\", options = \"RSS\" ) ), S3SQLInlineComponent( \"document\", name", "instead of opening the profile page settings.cms.location_click_filters = True #", "settings.fin.currencies = { \"USD\" : T(\"United States Dollars\"), } settings.L10n.languages", "Feed\"), \"poll\"), ], filterby = dict(field = \"contact_method\", options =", "= None mtable = s3db.org_group_membership mtable.group_id.widget = S3MultiSelectWidget(multiple=False) mtable.status_id.widget =", "hide fields in S3AddPersonWidget settings.pr.request_dob = False settings.pr.request_gender = False", "elif r.component_name == \"pr_group\": list_fields = [#(T(\"Network\"), \"group_team.org_group_id\"), \"name\", \"description\",", "\"\" # Gets replaced by widget levels = (\"L2\", \"L3\")", "use US-style dates in English settings.L10n.date_format = \"%m-%d-%Y\" # Start", "= customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS", "for Site lookup fields settings.org.site_autocomplete = True # Extra fields", "current.s3db table = r.table tablename = \"project_project\" table.code.label = T(\"Project", "fields = [(\"\", \"image\")], # filterby = dict(field = \"profile\",", "req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1) ).first()", "), S3OptionsFilter(\"organisation_organisation_type.organisation_type_id\", label = T(\"Type\"), #hidden = True, ), ]", "= field.writable = False table.name.label = T(\"Name\") table.description.label = T(\"Description\")", "return False if r.method not in (\"read\", \"update\"): types =", "\"L2\") manhattan = db(query).select(gtable.id, limitby=(0, 1)).first() if manhattan: field.default =", "= current.s3db rtable = s3db.req_req # Read the full record", ": [], # \"vol_volunteer_cluster.vol_cluster_type_id\" : [], # \"vol_volunteer_cluster.vol_cluster_id\" : [],", "Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", name_exists.channel_id) return # Check", "\"person_id$middle_name\", \"person_id$last_name\", ], label = T(\"Name\"), ), S3OptionsFilter(\"organisation_id\", filter =", "# restricted = True, # module_type = 10 # )),", "), S3OptionsFilter(\"training.course_id\", label = T(\"Training\"), hidden = True, ), S3OptionsFilter(\"group_membership.group_id\",", "s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db = current.s3db", "= EMAIL, multiple = False, fields = [(\"\", \"value\")], filterby", "options=\"iCal\" ) ), S3SQLInlineComponent( \"document\", name = \"data\", label =", "settings.inv.send_show_org = False # Types common to both Send and", "(\"assess\", Storage( name_nice = T(\"Assessments\"), #description = \"Rapid Assessments &", "None) == \"popup\": # Coming from req/create form # Hide", "components with components (such as org/group) to breakout from tabs", "Disable the old Contact one and link the URL one", "# Uncomment to disable the use of HR Trainings settings.hrm.use_trainings", "# ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function to decide which Marker", "Uncomment to disable the use of HR Trainings settings.hrm.use_trainings =", "custom_prep # Custom postp standard_postp = s3.postp def custom_postp(r, output):", "Type\"), fields = [(\"\", \"facility_type_id\")], multiple = False, required =", "True if not r.component: table = s3db.org_group list_fields = [\"name\",", "if not r.component: from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets", "on Sunday settings.L10n.firstDOW = 0 # Number formats (defaults to", "current from gluon.html import A, URL from gluon.storage import Storage", "= \"A library of digital resources, such as photos, documents", "S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label = T(\"Network\"), link =", "get_vars = r.get_vars # Context from a Profile page?\" organisation_id", "== 1: # Items ritable = s3db.req_req_item items = db(ritable.req_id", "from same org/site as Contacts they create r.component.table.site_id.default = None", "True settings.req.skill_quantities_writable = True settings.req.items_ask_purpose = False #settings.req.use_req_number = False", "dates in English settings.L10n.date_format = \"%m-%d-%Y\" # Start week on", "& Volunteers to be registered without an email address settings.hrm.email_required", "or URL if no_import: if name_exists.enabled: # Disable channel (&", "old Contact one and link the URL one to this", "name = \"phone2\", label = T(\"Phone2\"), multiple = False, fields", "# options=[True] # ) # ), ] list_fields = [(current.messages.ORGANISATION,", "filterby = dict(field = \"name\") ), S3SQLInlineComponentCheckbox( \"activity_type\", label =", "\"calendar\", #\"drr.hfa\", #\"objectives\", \"human_resource_id\", # Activities S3SQLInlineComponent( \"location\", label =", "INPUT from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget s3db", "True, module_type = 10 )), #(\"proc\", Storage( # name_nice =", "settings.customise_org_organisation_controller = customise_org_organisation_controller # ----------------------------------------------------------------------------- def customise_org_group_controller(**attr): s3db = current.s3db", "name_exists.channel_id) return elif name_exists.enabled: # Nothing to do :) return", "callable(standard_prep): result = standard_prep(r) if not result: return False if", "# #description = \"Membership Management System\", # restricted = True,", "Uncomment this to request the Mobile Phone when a user", "= T(\"Group Chairperson\") return result s3.prep = custom_prep # Custom", "cms_post in the newswire - @ToDo: Send out Tweets \"\"\"", "\"vol_volunteer_cluster.vol_cluster_position_id\" : [], # } # Uncomment to use an", "\"phone\", label = T(\"Phone\"), multiple = False, fields = [(\"\",", "except: # not available return current.messages[\"NONE\"] db = current.db mtable", "user is automatically approved #settings.auth.always_notify_approver = False # Uncomment this", "\"aadata\": if not r.component: from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter", "filter_widgets = [ S3TextFilter([\"person_id$first_name\", \"person_id$middle_name\", \"person_id$last_name\", ], label = T(\"Name\"),", "settings.customise_project_project_controller = customise_project_project_controller # ----------------------------------------------------------------------------- # Requests Management settings.req.req_type =", "s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # ----------------------------------------------------------------------------- def customise_req_req_resource(r, tablename): from s3layouts", "of opening the profile page settings.cms.location_click_filters = True # Uncomment", "\"list_fields\") list_fields.insert(1, \"group_membership.status_id\") return result s3.prep = custom_prep if current.auth.s3_logged_in():", "else: # We need a default form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id) #", "# \"vol_volunteer_cluster.vol_cluster_position_id\" : [], # } # Uncomment to use", "import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget field = table.location_id if r.method in", "return else: # Enable channel (& associated parsers) s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id)", "\"code\", \"organisation_id\", \"start_date\", \"end_date\", (T(\"Locations\"), \"location.location_id\"), ] s3db.configure(tablename, crud_form =", "series_id = None # Location is that of the site", "otable = s3db.org_organisation org = db(otable.id == r.component_id).select(otable.pe_id, limitby=(0, 1)", "= \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"email\", label", "= current.s3db.hrm_job_title table.organisation_id.readable = table.organisation_id.writable = False table.type.readable = table.type.writable", "Import Organisation/Network RSS Feeds \"\"\" s3db = current.s3db form_vars =", "s3db.msg_rss_channel old = current.db(table.url == old_rss).select(table.channel_id, table.enabled, limitby = (0,", "title=T(\"Create Facility\"), tooltip=current.messages.AUTOCOMPLETE_HELP) current.response.s3.req_req_postprocess = req_req_postprocess if not r.component and", "support requests #(\"support\", Storage( # name_nice = T(\"Support\"), # #description", "\"contact\", comment = DIV(INPUT(_type=\"checkbox\", _name=\"rss_no_import\", value = rss_import, ), T(\"Don't", "s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2 field = table.location_id field.label = \"\"", "name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: # Disable channel (& associated", "= list_fields, ) elif r.component_name == \"organisation\": # Add Network", "= [ S3TextFilter([\"name\", \"code\", \"description\", \"organisation.name\", \"organisation.acronym\", ], label =", "hierarchical dropdown instead of AC field.widget = None script =", "now) was last contacted settings.org.site_last_contacted = True # Enable certain", "elif \"Medical Clinic\" in types: marker = \"hospital\" elif \"Food\"", "for numbers (defaults to space) settings.L10n.thousands_separator = \",\" # Default", "s3db.hrm_human_resource.site_id field.default = site_id field.readable = field.writable = False hr_fields.remove(\"site_id\")", "= db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1)", "$('#person_add').attr('href',url)})''' current.response.s3.jquery_ready.append(script) settings.customise_req_req_resource = customise_req_req_resource # ----------------------------------------------------------------------------- # Comment/uncomment modules", "\"value\")], filterby = dict(field = \"contact_method\", options = \"EMAIL\")), )", "postp if callable(standard_postp): output = standard_postp(r, output) if r.interactive and", "their email address? settings.auth.registration_requires_verification = True # Do new users", "\"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", }, ), #\"budget\", #\"currency\", \"comments\",", "name_nice = T(\"Ticket Viewer\"), #description = \"Needed for Breadcrumbs\", restricted", "tabs # s3db.pr_group_membership.group_head.label = T(\"Chairperson\") return True s3.prep = custom_prep", "), \"meetings\", \"comments\", ) filter_widgets = [ S3TextFilter([\"name\", \"description\", \"comments\",", "False table.name.label = T(\"Name\") table.description.label = T(\"Description\") table.meetings.readable = table.meetings.writable", "the label of \"Teams\" to \"Groups\" settings.hrm.teams = \"Groups\" #", "in rows] # Use Marker in preferential order if \"Hub\"", "True) & \\ (mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name,", "1, )), (\"project\", Storage( name_nice = T(\"Projects\"), #description = \"Tracking", "settings.customise_pr_person_controller = customise_pr_person_controller # ----------------------------------------------------------------------------- # Groups def chairperson(row): \"\"\"", "#settings.hrm.use_skills = False # Uncomment to disable the use of", "Stock Adjustments, 'Stock Counts' settings.inv.stock_count = True # Uncomment to", "Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment to hide", "attr settings.customise_org_group_controller = customise_org_group_controller # ----------------------------------------------------------------------------- # Persons # Uncomment", "except: # Python 2.6 from gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon", "3, )), #(\"vol\", Storage( # name_nice = T(\"Volunteers\"), # #description", "isinstance(output, dict): if \"rheader\" in output: # Custom Tabs tabs", "Type by Project filter = {\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\":", "settings.req.summary = True # ----------------------------------------------------------------------------- def req_req_postprocess(form): \"\"\" Runs after", "\"Situation Awareness & Geospatial Analysis\", restricted = True, module_type =", "in skills: item = \"%s %s\" % (skill.quantity, skill_represent(skill.skill_id)) body", "f=\"person\", args=chair.id)) else: return current.messages[\"NONE\"] # ----------------------------------------------------------------------------- def customise_pr_group_controller(**attr): s3", "Coming from req/create form # Hide most Fields from s3", "= S3SQLCustomForm(\"person_id\", \"organisation_id\", \"site_id\", S3SQLInlineComponent( \"group_person\", label = T(\"Network\"), link", "person controllers) - runs after controller customisation - but runs", "now # Ultimately should go into location_id$addr_street fields = [(\"\",", "S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"), field = \"activity_type_id\", cols =", "of appropriate Resources (Human, Assets & Facilities).\", restricted = True,", "None from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\", #\"middle_name\",", "default this onvalidation table.name.notnull = False table.name.requires = None crud_form", "= (0, 1) ).first() if old and old.enabled: s3db.msg_channel_disable(\"msg_rss_channel\", old.channel_id)", "user-visible functionality of this module isn't normally required. Rather it's", "s3layouts import S3AddResourceLink s3db.pr_group_membership.person_id.comment = \\ S3AddResourceLink(c=\"pr\", f=\"person\", title=T(\"Create Person\"),", "row.site_id).select(otable.location_id, limitby=(0, 1) ).first().location_id # Create Post ptable = s3db.cms_post", "query = (ctable.pe_id == pe_id) & \\ (ctable.contact_method == \"RSS\")", "= s3db.hrm_human_resource.organisation_id field.default = organisation_id field.readable = field.writable = False", "name_exists.channel_id) return # Check if we already have a channel", "display in Representations settings.org.site_autocomplete_fields = (\"organisation_id$name\", \"location_id$addr_street\", ) # Uncomment", "T(\"Data\"), multiple = False, fields = [(\"\", \"url\")], filterby =", "\"update\"): script = \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?') var q=S3.queryString.parse(url[1])", "the old Contact one and link the URL one to", "replaced by widget levels = (\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels)", "IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget = None from s3 import", "T(\"Network\"), fields = [(\"\", \"group_id\"), (\"\", \"status_id\"), ], ), S3SQLInlineComponent(", "disable Staff experience settings.hrm.staff_experience = False # Uncomment to disable", "== ptable.id) chair = db(query).select(ptable.first_name, ptable.middle_name, ptable.last_name, ptable.id, limitby=(0, 1)).first()", "else: result = True s3db = current.s3db #if r.method ==", "just for specific Organisations # empty list => disabled for", "gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE() #image_field.widget = None from", "pack values settings.inv.track_pack_values = False settings.inv.send_show_org = False # Types", "to member list when a new group has been created", "current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if name_exists.url == rss_url: # No", "mtable = db.gis_marker try: marker = db(mtable.name == marker).select(mtable.image, mtable.height,", "standard_prep(r) else: result = True if not r.component: table =", "create field.default = None # Use a hierarchical dropdown instead", "Site settings.req.requester_from_site = True # Label for Inventory Requests settings.req.type_inv_label", "put into the Comments box for now # Ultimately should", ")), (\"appadmin\", Storage( name_nice = T(\"Administration\"), #description = \"Site Administration\",", "= URL(c=\"hrm\", f=\"group\", args=[\"[id]\", \"group_membership\"]), ) settings.customise_pr_group_resource = customise_pr_group_resource #", "True, module_type = None # No Menu )), (\"errors\", Storage(", "result = True if r.interactive or r.representation == \"aadata\": if", "form.vars.id db = current.db s3db = current.s3db rtable = s3db.req_req", "= False # Label for Requester settings.req.requester_label = \"Site Contact\"", "s3db.msg_channel_enable(\"msg_rss_channel\", url_exists.channel_id) return elif no_import: # Nothing to do :)", "= \"ZIP Code\" # Uncomment to disable responsive behavior of", "{\"linktable\": \"project_activity_type_project\", \"lkey\": \"project_id\", \"rkey\": \"activity_type_id\", }, ), #\"budget\", #\"currency\",", "== True) & \\ (mtable.person_id == ptable.id) chair = db(query).select(ptable.first_name,", "these Assessments to the Survey module #(\"building\", Storage( # name_nice", "access = \"|1|\", # Only Administrators can see this module", ")), (\"errors\", Storage( name_nice = T(\"Ticket Viewer\"), #description = \"Needed", "# Human Resource Management # Uncomment to chage the label", "table.name.requires = None crud_form = S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility", "# Medium marker = \"%s_yellow\" % marker elif reqs ==", "\"Needed for Breadcrumbs\", restricted = False, module_type = None #", "URL name_exists.update_record(url=rss_url) if no_import: if name_exists.enabled: # Disable channel (&", "% (settings.get_base_public_url(), URL(c=\"req\", f=\"req\", args=req_id)) s3db.doc_document.insert(doc_id=record[\"doc_id\"], url=url, ) # -----------------------------------------------------------------------------", "\"filter-search\", ), S3OptionsFilter(\"group_team.org_group_id\", label = T(\"Network\"), #hidden = True, ),", "HR Certificates settings.hrm.use_certificates = False # Uncomment to disable the", "= S3SQLCustomForm( \"name\", \"acronym\", S3SQLInlineLink( \"organisation_type\", field = \"organisation_type_id\", label", "= True, ), ] # Need to re-do list_fields as", "= True, ), S3OptionsFilter(\"group_person.group_id\", label = T(\"Network\"), #filter = True,", "= True # ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\" Function to decide", "\"job_title_id\", \"start_date\", ) list_fields = [\"id\", \"person_id\", \"job_title_id\", \"organisation_id\", (T(\"Network\"),", "= (\"org_organisation\",) # ----------------------------------------------------------------------------- # Audit def audit_write(method, tablename, form,", "either Contact Name or URL if no_import: if name_exists.enabled: #", "= (\"L2\", \"L3\") field.requires = IS_LOCATION_SELECTOR2(levels=levels) field.widget = S3LocationSelectorWidget2(levels=levels, hide_lx=False,", "current.db mtable = current.s3db.pr_group_membership ptable = db.pr_person query = (mtable.group_id", "= T(\"Network\"), fields = [(\"\", \"group_id\"), (\"\", \"status_id\"), ], ),", "query = (ltable.site_id == record.site_id) & \\ (ltable.facility_type_id == table.id)", "from Sites #settings.org.site_inv_req_tabs = True # ----------------------------------------------------------------------------- def facility_marker_fn(record): \"\"\"", "list when a new group has been created create_next =", "[(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label = T(\"Neighborhoods Served\"), field", "S3TextFilter # activate hierarchical org_service: #from s3 import S3LocationFilter, S3OptionsFilter,", "\"pr_group\"): row = row.pr_group try: group_id = row.id except: #", "to import rss_import = \"on\" else: # Default rss_import =", "the menu )), # Uncomment to enable internal support requests", "= customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db s3", "\"parse_rss\"]) # ----------------------------------------------------------------------------- # Human Resource Management # Uncomment to", "@ToDo: Widget to handle Start & End in 1! S3DateFilter(\"start_date\",", "False # Uncomment this to use Activities for projects settings.project.activities", "= \"Facility\" #settings.org.site_label = \"Location\" # Uncomment to show the", "# ----------------------------------------------------------------------------- # CMS # Uncomment to use Bookmarks in", "for projects settings.project.activities = True # Uncomment this to use", "[\"organisation_id\", \"comments\", # NB This is labelled 'Role' in DRRPP", "= list_fields, ) return result s3.prep = custom_prep return attr", "# Call standard postp if callable(standard_postp): output = standard_postp(r, output)", "S3SQLCustomForm(S3SQLInlineComponent( \"site_facility_type\", label = T(\"Facility Type\"), fields = [(\"\", \"facility_type_id\")],", "the Contact & 1 for the URL # Disable the", "= dict(field = \"contact_method\", options = \"WORK_PHONE\" ) ), S3SQLInlineComponent(", "Component if r.component_id: # Update form db = current.db otable", "return False settings.security.audit_write = audit_write # ----------------------------------------------------------------------------- # CMS #", "= [\"document_id\", \"name\", \"url\", \"comments\", ], filterby = dict(field =", "\"image\", # label = T(\"Photo\"), # multiple = False, #", "filterby = dict(field = \"contact_method\", options = \"EMAIL\" ) ),", "options = \"EMAIL\" ) ), \"website\", S3SQLInlineComponent( \"contact\", comment =", "to Letter settings.base.paper_size = T(\"Letter\") # Restrict the Location Selector", "other resources. Matches against Inventories where supplies are requested.\", restricted", "in hrm/group field.readable = field.writable = False table.name.label = T(\"Name\")", "Contact # and ensure active or not as appropriate #", "(T(\"Services\"), \"service.name\"), \"phone\", (T(\"Email\"), \"email.value\"), \"website\" #(T(\"Neighborhoods Served\"), \"location.name\"), ]", "settings.cms.show_tags = True # Uncomment to show post Titles in", "False # Label for Requester settings.req.requester_label = \"Site Contact\" #", "name) if name_exists.enabled: # Disable channel (& associated parsers) s3db.msg_channel_disable(\"msg_rss_channel\",", "Perform normal Audit return True else: # Don't Audit non", "to disable the use of HR Certificates settings.hrm.use_certificates = False", "URL from gluon.storage import Storage from s3 import s3_fullname T", "return attr settings.customise_pr_group_controller = customise_pr_group_controller # ----------------------------------------------------------------------------- def customise_pr_group_resource(r, tablename):", "current.s3db form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \\ form_vars.rsscontact_i_value_edit_none", "other modules. module_type = None, )), (\"supply\", Storage( name_nice =", "S3SQLInlineComponent( \"organisation\", name = \"partner\", label = T(\"Partner Organizations\"), fields", "#(\"building\", Storage( # name_nice = T(\"Building Assessments\"), # #description =", "\"name\", \"url\", \"comments\", ], filterby = dict(field = \"name\") ),", "\"description\", \"meetings\", (T(\"Chairperson\"), \"chairperson\"), \"comments\", ] s3db.configure(\"pr_group\", crud_form = crud_form,", "r.component: from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter filter_widgets = [", "= T(\"Support\"), # #description = \"Support Requests\", # restricted =", "levels = (\"L1\", \"L2\", \"L3\", \"L4\"), hidden = True, ),", "T(\"Messaging\"), #description = \"Sends & Receives Alerts via Email &", "Default Country Code for telephone numbers settings.L10n.default_country_code = 1 #", "Email & SMS\", restricted = True, # The user-visible functionality", "restricted = True, module_type = None # No Menu )),", "1) ).first() if url_exists: # Either Contact has changed Name", "onvalidation = org_facility_onvalidation, ) return True s3.prep = custom_prep return", "which shouldn't be disabled (\"default\", Storage( name_nice = T(\"Home\"), restricted", "rss_import = None else: # Component if r.component_id: # Update", "settings.inv.facility_label = \"Facility\" # Uncomment if you need a simpler", "== rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1) ).first() if", "tab for Offices/Facilities settings.req.summary = True # ----------------------------------------------------------------------------- def req_req_postprocess(form):", "organisation & person controllers) - runs after controller customisation -", "& \\ (ctable.deleted == False) rss = db(query).select(ctable.poll, limitby=(0, 1)", "# ) # ), ] list_fields = [(current.messages.ORGANISATION, \"human_resource.organisation_id\"), \"first_name\",", "#image_field = s3db.pr_image.image #from gluon.validators import IS_IMAGE #image_field.requires = IS_IMAGE()", "table.organisation_id.writable = False table.type.readable = table.type.writable = False return result", "Default rss_import = None else: # Component if r.component_id: #", "for item in items: item = \"%s %s %s\" %", "), S3SQLInlineComponentCheckbox( \"activity_type\", label = T(\"Categories\"), field = \"activity_type_id\", cols", "(verified) user, even if the user is automatically approved #settings.auth.always_notify_approver", "Only Administrators can see this module in the default menu", "settings.cms.location_click_filters = True # Uncomment to use organisation_id instead of", "Use Marker in preferential order if \"Hub\" in types: marker", "[(\"\", \"org_group_id\")], # @ToDo: Make this optional? multiple = False,", "Administrators can see this module in the default menu &", "to show post Titles in Newsfeed settings.cms.show_titles = True #", "help\") script = '''$('#project_project_code').attr('maxlength','100')''' s3.jquery_ready.append(script) crud_form = S3SQLCustomForm( \"organisation_id\", \"name\",", "), S3SQLInlineComponent( \"document\", name = \"iCal\", label = \"iCAL\", multiple", "Human Resource Management # Uncomment to chage the label for", "(media, fundraising, website, social media, etc.\"), fields = [\"document_id\", \"name\",", "types: marker = \"asset\" elif \"Residential Building\" in types: marker", "= \"Allow affected individuals & households to register to receive", "= standard_prep(r) else: result = True if r.interactive or r.representation", "= current.response.s3 # Custom prep standard_prep = s3.prep def custom_prep(r):", "== req_id).select(rtable.type, rtable.site_id, rtable.requester_id, rtable.priority, rtable.date_required, rtable.purpose, rtable.comments, limitby=(0, 1)", "if r.interactive or r.representation == \"aadata\": if not r.component: hr_fields", "= \"hospital\" elif \"Food\" in types: marker = \"food\" elif", "module_type = 9, # 8th item in the menu )),", "settings.L10n.utc_offset = \"UTC -0500\" # Uncomment these to use US-style", "location_id$addr_street fields = [(\"\", \"comments\")], ), S3SQLInlineComponentMultiSelectWidget( \"location\", label =", "= True, module_type = 3, )), #(\"vol\", Storage( # name_nice", "rss_import, ), T(\"Don't Import Feed\")), name = \"rss\", label =", "False # ----------------------------------------------------------------------------- # Persons def customise_pr_person_controller(**attr): s3 = current.response.s3", "= True if not r.component and (r.interactive or r.representation ==", "= list_fields, ) settings.customise_org_organisation_resource = customise_org_organisation_resource # ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr):", "address = form_vars.get(\"address\", None) if address: form_vars.name = address else:", "(\"admin\", Storage( name_nice = T(\"Admin\"), #description = \"Site Administration\", restricted", "url_exists.channel_id) return else: # Update the URL name_exists.update_record(url=rss_url) if no_import:", "T(\"Support\"), # #description = \"Support Requests\", # restricted = True,", "== \"organisation\": # Add Network Status to List Fields list_fields", "= OrderedDict([ (\"en\", \"English\"), (\"es\", \"Español\"), ]) # Authentication settings", "table.enabled, limitby = (0, 1) ).first() if old and old.enabled:", "= False # Change the label of \"Teams\" to \"Groups\"", "to verify their email address? settings.auth.registration_requires_verification = True # Do", "\"Used within Inventory Management, Request Management and Asset Management\", restricted", "Newsfeed settings.cms.show_links = True # Uncomment to show Tags in", "without the file # image_field = s3db.pr_image.image # image_field.requires =", "to register themselves? settings.security.self_registration = \"index\" # Do new users", "= None from s3 import S3SQLCustomForm, S3SQLInlineComponent s3_sql_custom_fields = [\"first_name\",", "= True, module_type = 4 )), # All modules below", "ctable = s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) & \\", "# ----------------------------------------------------------------------------- def customise_org_organisation_controller(**attr): s3db = current.s3db s3 = current.response.s3", "not r.component and (r.interactive or r.representation == \"aadata\"): from s3", "marker = \"%s_red\" % marker elif reqs == 2: #", "clicking on locations instead of opening the profile page settings.cms.location_click_filters", "customise_pr_group_resource # ----------------------------------------------------------------------------- def pr_contact_postprocess(form): \"\"\" Import Organisation/Network RSS Feeds", ")), (\"admin\", Storage( name_nice = T(\"Admin\"), #description = \"Site Administration\",", "location_id=location_id, person_id=row.requester_id, ) record = dict(id=_id) s3db.update_super(ptable, record) # Add", "a user registers settings.auth.registration_requests_mobile_phone = True # Uncomment this to", "Organisations in HR module #settings.hrm.organisation_label = \"National Society / Branch\"", "Field to show the chairperson of a group \"\"\" if", "multiple = False, ), \"job_title_id\", \"start_date\", ) list_fields = [\"id\",", "(\"survey\", Storage( name_nice = T(\"Surveys\"), #description = \"Create, enter, and", "Create form: Default rss_import = None crud_form = S3SQLCustomForm( \"name\",", "s3db.add_components(\"pr_group\", org_group_team = \"group_id\", ) s3db.configure(\"pr_group\", # Redirect to member", "db(table.url == rss_url).select(table.id, table.channel_id, table.enabled, limitby = (0, 1) ).first()", "== \"New York\") & \\ (gtable.level == \"L2\") manhattan =", "), S3SQLInlineComponent( \"document\", name = \"media\", label = T(\"URLs (media,", "standard_prep(r) if not result: return False from s3 import S3Represent,", "gluon.contrib.simplejson.ordered_dict import OrderedDict from gluon import current from gluon.html import", "Default rss_import = None crud_form = S3SQLCustomForm( \"name\", \"location_id\", \"mission\",", "4 )), # All modules below here should be possible", "= current.s3db form_vars = form.vars rss_url = form_vars.rsscontact_i_value_edit_0 or \\", "settings.cms.organisation_group = \"post_organisation_group.group_id\" # Uncomment to use person_id instead of", "= 10, # )), # @ToDo: Rewrite in a modern", "that we don't wish to import rss_import = \"on\" else:", "= \"email\", label = EMAIL, multiple = False, fields =", "separately for the menu # )), (\"gis\", Storage( name_nice =", "db.gis_location query = (gtable.name == \"New York\") & \\ (gtable.level", "= s3db.pr_contact query = (ctable.pe_id == r.record.pe_id) & \\ (ctable.contact_method", "if r.method != \"read\": from gluon.validators import IS_EMPTY_OR from s3", "in Newsfeed settings.cms.show_tags = True # Uncomment to show post", "for 'Mobile Phone' settings.ui.label_mobile_phone = \"Cell Phone\" # Enable this", "no_import = current.request.post_vars.get(\"rss_no_import\", None) if name_exists: if name_exists.url == rss_url:", "%(date)s\" % dict(priority=priority, date=date) else: title = priority body =", "customise_org_facility_controller(**attr): s3db = current.s3db s3 = current.response.s3 # Tell the", "options = \"SMS\")), ) s3_sql_custom_fields.insert(3, S3SQLInlineComponent( \"contact\", name = \"email\",", "Resources Management\", restricted = True, module_type = 3, )), #(\"vol\",", "True # Increase size of widget from s3 import s3_comments_widget", "the label for 'Mobile Phone' settings.ui.label_mobile_phone = \"Cell Phone\" #", "AC field.widget = None script = \\ '''$.filterOptionsS3({ 'trigger':'organisation_id', 'target':'site_id',", "disable the use of HR Certificates settings.hrm.use_certificates = False #", "\"aadata\": if not r.component: hr_fields = [\"organisation_id\", \"job_title_id\", \"site_id\", ]", "Code\" # Uncomment to disable responsive behavior of datatables #", "Filter form in Newsfeed be open by default settings.cms.filter_open =", "= True, polygons = True, ) # Default location to", "to this module access = None, # All Users (inc", "= s3db.pr_group field = table.group_type field.default = 3 # Relief", "Uncomment to enable the use of HR Education settings.hrm.use_education =", "in (\"create\", \"update\"): script = \\ '''$('#req_req_site_id').change(function(){ var url=$('#person_add').attr('href') url=url.split('?')", "field.default = manhattan.id table.mission.readable = table.mission.writable = True table.meetings.readable =", "old one name_exists.update_record(name=\"%s (Old)\" % name) if name_exists.enabled: # Disable", "to change the label for 'Postcode' settings.ui.label_postcode = \"ZIP Code\"", "be drawn up\", # restricted = True, # module_type =", "r.component: table = s3db.org_group list_fields = [\"name\", \"mission\", \"website\", \"meetings\",", "s3_fullname T = current.T settings = current.deployment_settings \"\"\" Template settings", "[(\"\", \"value\"), #(T(\"Don't Import Feed\"), \"poll\"), ], filterby = dict(field", "= T(\"Network\"), fields = [(\"\", \"org_group_id\")], # @ToDo: Make this" ]
[ "# -*- coding: utf-8 -*- from main import main main(\"issue561-v1\",", "/usr/bin/env python # -*- coding: utf-8 -*- from main import", "python # -*- coding: utf-8 -*- from main import main", "<reponame>nitinkaveriappa/downward #! /usr/bin/env python # -*- coding: utf-8 -*- from", "#! /usr/bin/env python # -*- coding: utf-8 -*- from main", "-*- coding: utf-8 -*- from main import main main(\"issue561-v1\", \"issue561-v2\")" ]
[ "= os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if __name__ == '__main__': main()", "TestCase, main import qiime2 import os from q2_qemistree import MGFDirFmt,", "main import qiime2 import os from q2_qemistree import MGFDirFmt, SiriusDirFmt,", "in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------", "# exceptions are raised with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'],", "Distributed under the terms of the Modified BSD License. #", "with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions", "# # The full license is in the file LICENSE,", "self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') #", "from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory", "= qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) #", "def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result =", "self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents", "MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR,", "result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path())", "result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt'", "self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza'))", "ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self):", "self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result =", "result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in", "= self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents", "with this software. # ---------------------------------------------------------------------------- from unittest import TestCase, main", "MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints)", "= self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents =", "(compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def", "features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout", "ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def", "is working fine obs = os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath,", "compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents))", "distributed with this software. # ---------------------------------------------------------------------------- from unittest import TestCase,", "rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def setUp(self):", "os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions", "'') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res,", "SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR,", "self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv'", "os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with", "constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised", "compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt)", "self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv'", "ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything is", "self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result =", "os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS'))", "terms of the Modified BSD License. # # The full", "from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import", "os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result", "---------------------------------------------------------------------------- from unittest import TestCase, main import qiime2 import os", "= os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/'", "the Modified BSD License. # # The full license is", "# Distributed under the terms of the Modified BSD License.", "test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath,", "# ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything", "ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3')", "in contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath,", "OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import", "ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative')", "are raised with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def", "= artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result", "features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def", "ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout =", "constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions,", "# The full license is in the file LICENSE, distributed", "q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__))", "obs = os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G')", "everything is working fine obs = os.environ.get('_JAVA_OPTIONS', '') res =", "rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def", "['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are", "def test_artifactory(self): # everything is working fine obs = os.environ.get('_JAVA_OPTIONS',", "test_artifactory(self): # everything is working fine obs = os.environ.get('_JAVA_OPTIONS', '')", "compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in", "# ---------------------------------------------------------------------------- from unittest import TestCase, main import qiime2 import", "= qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def", "self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza'))", "test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap')", "q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory class", "this software. # ---------------------------------------------------------------------------- from unittest import TestCase, main import", "fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self):", "= self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path())", "Copyright (c) 2016-2018, QIIME 2 development team. # # Distributed", "artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions", "software. # ---------------------------------------------------------------------------- from unittest import TestCase, main import qiime2", "= os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt)", "license is in the file LICENSE, distributed with this software.", "self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything is working", "file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from unittest", "= artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) #", "contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout,", "qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder", "THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR,", "= os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs,", "self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised with self.assertRaises(OSError):", "def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15,", "under the terms of the Modified BSD License. # #", "res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs, java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs))", "artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath =", "profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt)", "import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas,", "= compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in", "with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions", "team. # # Distributed under the terms of the Modified", "contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions,", "from unittest import TestCase, main import qiime2 import os from", "test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap',", "of the Modified BSD License. # # The full license", "molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if __name__", "= self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions)", "profile='orbitrap') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions", "import artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath", "working fine obs = os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'],", "2 development team. # # Distributed under the terms of", "= rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents))", "development team. # # Distributed under the terms of the", "ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if __name__ ==", "fine obs = os.environ.get('_JAVA_OPTIONS', '') res = artifactory(self.goodsirpath, ['--help'], constructor=OutputDirs,", "features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents))", "self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised with self.assertRaises(OSError): res =", "self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR,", "'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder", "class FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR,", "self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents =", "self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions =", "qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything is working fine obs", "import qiime2 import os from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt,", "profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self):", "predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if", "# everything is working fine obs = os.environ.get('_JAVA_OPTIONS', '') res", "= compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt'", "self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions =", "ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint", "contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions =", "in contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath,", "ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout,", "ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents", "BSD License. # # The full license is in the", "LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from unittest import", "import os from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from", "setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath =", "features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout", "q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees,", "contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents)) if __name__ == '__main__':", "predict_fingerprints) from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR", "full license is in the file LICENSE, distributed with this", "ionization_mode='n3gativ3') def test_reranking(self): ions = self.ions.view(MGFDirFmt) sirout = self.sirout.view(SiriusDirFmt) result", "def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15)", "# MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout =", "raised with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self):", "result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in", "test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap',", "unittest import TestCase, main import qiime2 import os from q2_qemistree", "java_flags='-Xms2G') self.assertEqual(obs, os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised with", "self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError):", "os from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree", "os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) #", "= qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): # everything is working fine", "self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap') contents = os.listdir(result.get_path())", "artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result =", "= self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents = os.listdir(result.get_path())", "def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15,", "contents = os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout =", "= os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt)", "the terms of the Modified BSD License. # # The", "def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15,", "# SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout =", "= os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions = self.ions.view(MGFDirFmt)", "sirout = self.sirout.view(SiriusDirFmt) result = rerank_molecular_formulas(sirius_path=self.goodsirpath, fragmentation_trees=sirout, features=ions) contents =", "= predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents = os.listdir(result.get_path()) self.assertTrue(('summary_csi_fingerid.csv' in contents))", "SiriusDirFmt, ZodiacDirFmt, OutputDirs from q2_qemistree import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from", "'data/zodFolder.qza')) def test_artifactory(self): # everything is working fine obs =", "zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents =", "---------------------------------------------------------------------------- # Copyright (c) 2016-2018, QIIME 2 development team. #", "def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath", "Modified BSD License. # # The full license is in", "The full license is in the file LICENSE, distributed with", "FingerprintTests(TestCase): def setUp(self): THIS_DIR = os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin')", "= os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures", "= self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def", "os.listdir(result.get_path()) self.assertTrue(('zodiac_summary.csv' in contents)) def test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result", "'data/sirius.mgf.qza')) # SiriusFolder self.sirout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout", "License. # # The full license is in the file", "qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self):", "test_fingerid(self): zodout = self.zodout.view(ZodiacDirFmt) result = predict_fingerprints(sirius_path=self.goodsirpath, molecular_formulas=zodout, ppm_max=15) contents", "in contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath,", "os.environ.get('_JAVA_OPTIONS')) self.assertTrue(isinstance(res, OutputDirs)) # exceptions are raised with self.assertRaises(OSError): res", "qiime2 import os from q2_qemistree import MGFDirFmt, SiriusDirFmt, ZodiacDirFmt, OutputDirs", "'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza')) # SiriusFolder self.sirout", "res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt)", "(c) 2016-2018, QIIME 2 development team. # # Distributed under", "2016-2018, QIIME 2 development team. # # Distributed under the", "self.ions.view(MGFDirFmt) with self.assertRaises(ValueError): compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions, ppm_max=15, profile='orbitrap', ionization_mode='n3gativ3') def test_reranking(self):", "from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase): def setUp(self): THIS_DIR =", "OutputDirs)) # exceptions are raised with self.assertRaises(OSError): res = artifactory(self.badsirpath,", "contents)) def test_fragmentation_trees_negative_ionization(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath, features=ions,", "= os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/sirius.mgf.qza'))", "the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- from", "'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin') # MassSpectrometryFeatures self.ions =", "QIIME 2 development team. # # Distributed under the terms", "exceptions are raised with self.assertRaises(OSError): res = artifactory(self.badsirpath, ['--help'], constructor=OutputDirs)", "# # Distributed under the terms of the Modified BSD", "is in the file LICENSE, distributed with this software. #", "import (compute_fragmentation_trees, rerank_molecular_formulas, predict_fingerprints) from q2_qemistree._fingerprint import artifactory class FingerprintTests(TestCase):", "'data/sirFolder.qza')) # ZodiacFolder self.zodout = qiime2.Artifact.load(os.path.join(THIS_DIR, 'data/zodFolder.qza')) def test_artifactory(self): #", "['--help'], constructor=OutputDirs) def test_fragmentation_trees(self): ions = self.ions.view(MGFDirFmt) result = compute_fragmentation_trees(sirius_path=self.goodsirpath,", "ionization_mode='negative') contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_exception(self): ions", "contents = os.listdir(result.get_path()) self.assertTrue(('version.txt' in contents)) def test_fragmentation_trees_negative_ionization(self): ions =", "import TestCase, main import qiime2 import os from q2_qemistree import", "# Copyright (c) 2016-2018, QIIME 2 development team. # #", "# ---------------------------------------------------------------------------- # Copyright (c) 2016-2018, QIIME 2 development team.", "os.path.dirname(os.path.abspath(__file__)) self.badsirpath = os.path.join(THIS_DIR, 'data/foo/bin') self.goodsirpath = os.path.join(THIS_DIR, 'data/' 'sirius-linux64-headless-4.0.1/bin')" ]
[ "time import datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs): \"Monkey", "patch an object's attributes, restoring them after the block.\" stored", "of a block, if over optional threshold.\" start = time.time()", "stored[name] = getattr(obj, name) setattr(obj, name, attrs[name]) try: yield finally:", "{} for name in attrs: stored[name] = getattr(obj, name) setattr(obj,", "def patch(obj, **attrs): \"Monkey patch an object's attributes, restoring them", "try: yield finally: for name in stored: setattr(obj, name, stored[name])", "them after the block.\" stored = {} for name in", "getattr(obj, name) setattr(obj, name, attrs[name]) try: yield finally: for name", "name, attrs[name]) try: yield finally: for name in stored: setattr(obj,", "finally: for name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def", "name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): \"Print elapsed time of", "@contextlib.contextmanager def timed(msg='', threshold=0): \"Print elapsed time of a block,", "= time.time() - start if elapsed >= threshold: print datetime.timedelta(seconds=elapsed),", "elapsed time of a block, if over optional threshold.\" start", "over optional threshold.\" start = time.time() try: yield finally: elapsed", "block.\" stored = {} for name in attrs: stored[name] =", "object's attributes, restoring them after the block.\" stored = {}", "= getattr(obj, name) setattr(obj, name, attrs[name]) try: yield finally: for", "contextlib @contextlib.contextmanager def patch(obj, **attrs): \"Monkey patch an object's attributes,", "time.time() try: yield finally: elapsed = time.time() - start if", "yield finally: elapsed = time.time() - start if elapsed >=", "import datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs): \"Monkey patch", "the block.\" stored = {} for name in attrs: stored[name]", "attributes, restoring them after the block.\" stored = {} for", "for name in attrs: stored[name] = getattr(obj, name) setattr(obj, name,", "@contextlib.contextmanager def patch(obj, **attrs): \"Monkey patch an object's attributes, restoring", "def timed(msg='', threshold=0): \"Print elapsed time of a block, if", "= {} for name in attrs: stored[name] = getattr(obj, name)", "timed(msg='', threshold=0): \"Print elapsed time of a block, if over", "stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): \"Print elapsed time of a", "import contextlib @contextlib.contextmanager def patch(obj, **attrs): \"Monkey patch an object's", "start = time.time() try: yield finally: elapsed = time.time() -", "import time import datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs):", "threshold=0): \"Print elapsed time of a block, if over optional", "threshold.\" start = time.time() try: yield finally: elapsed = time.time()", "stored = {} for name in attrs: stored[name] = getattr(obj,", "patch(obj, **attrs): \"Monkey patch an object's attributes, restoring them after", "in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): \"Print", "if over optional threshold.\" start = time.time() try: yield finally:", "\"Monkey patch an object's attributes, restoring them after the block.\"", "an object's attributes, restoring them after the block.\" stored =", "setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): \"Print elapsed time", "= time.time() try: yield finally: elapsed = time.time() - start", "optional threshold.\" start = time.time() try: yield finally: elapsed =", "for name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='',", "time.time() - start if elapsed >= threshold: print datetime.timedelta(seconds=elapsed), msg", "a block, if over optional threshold.\" start = time.time() try:", "\"Print elapsed time of a block, if over optional threshold.\"", "name in attrs: stored[name] = getattr(obj, name) setattr(obj, name, attrs[name])", "stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0): \"Print elapsed", "after the block.\" stored = {} for name in attrs:", "**attrs): \"Monkey patch an object's attributes, restoring them after the", "finally: elapsed = time.time() - start if elapsed >= threshold:", "block, if over optional threshold.\" start = time.time() try: yield", "elapsed = time.time() - start if elapsed >= threshold: print", "yield finally: for name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager", "in attrs: stored[name] = getattr(obj, name) setattr(obj, name, attrs[name]) try:", "attrs[name]) try: yield finally: for name in stored: setattr(obj, name,", "attrs: stored[name] = getattr(obj, name) setattr(obj, name, attrs[name]) try: yield", "time of a block, if over optional threshold.\" start =", "restoring them after the block.\" stored = {} for name", "name) setattr(obj, name, attrs[name]) try: yield finally: for name in", "name in stored: setattr(obj, name, stored[name]) @contextlib.contextmanager def timed(msg='', threshold=0):", "try: yield finally: elapsed = time.time() - start if elapsed", "datetime import contextlib @contextlib.contextmanager def patch(obj, **attrs): \"Monkey patch an", "setattr(obj, name, attrs[name]) try: yield finally: for name in stored:" ]
[ "cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type,", "import db from tempo import notifier from tempo import queue", "as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification came from')", "the License. import logging import kombu from tempo import actions", "locals()) return _perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange,", "2.0 (the \"License\"); # you may not use this file", "kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key)", "= exception publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type,", "notification came from') ] worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group)", "from') ] worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "% locals()) _notify('Errored Task', exception=e) else: logger.debug(\"task '%(task_uuid)s' finished: returned", "None: payload['exception'] = exception publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG", "key=key) connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume()", "'%(task_uuid)s' errored: %(e)s\" % locals()) _notify('Errored Task', exception=e) else: logger.debug(\"task", "but this works well # as a way of backgrounding", "common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\" % locals()) return _perform_task(task) def", "connection.drain_events() def consume_messages(exchange, queue, key): if CFG.worker.daemonized: # TODO(mdietz): there's", "locals()) _notify('Finished Task') def _process_message(body, message): message.ack() task_uuid = body['task_uuid']", "use this file except in compliance with the License. #", "default=False, help='Run worker as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the", "exception publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority,", "'%(task_uuid)s' started: '%(action)s'\" % locals()) _notify('Started Task') try: func(task) except", "Exception as e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\" % locals()) _notify('Errored", "tempo import notifier from tempo import queue as tempo_queue from", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "key): if CFG.worker.daemonized: # TODO(mdietz): there's a cleaner way to", "cfg from tempo.openstack.common import exception as common_exception CFG = config.CFG", "License. # You may obtain a copy of the License", "= {'task_uuid': task_uuid} if exception is not None: payload['exception'] =", "'%(task_uuid)s'\" % locals()) return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" % locals())", "works well # as a way of backgrounding the server", "= body['task_uuid'] try: task = db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s'", "under the License is distributed on an \"AS IS\" BASIS,", "License for the specific language governing permissions and # limitations", "payload['exception'] = exception publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id,", "Reserved. # # Licensed under the Apache License, Version 2.0", "while True: connection.drain_events() def consume_messages(exchange, queue, key): if CFG.worker.daemonized: #", "db from tempo import notifier from tempo import queue as", "task.uuid try: func = getattr(actions, action) except AttributeError: logger.error(\"unrecognized action", "_process_message(body, message): message.ack() task_uuid = body['task_uuid'] try: task = db.task_get(task_uuid)", "# limitations under the License. import logging import kombu from", "'%(task_uuid)s' finished: returned successfully\" % locals()) _notify('Finished Task') def _process_message(body,", "_perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True)", "from tempo import actions from tempo import config from tempo", "in compliance with the License. # You may obtain a", "tempo import config from tempo import db from tempo import", "locals()) _notify('Errored Task', exception=e) else: logger.debug(\"task '%(task_uuid)s' finished: returned successfully\"", "software # distributed under the License is distributed on an", "consumer.consume() while True: connection.drain_events() def consume_messages(exchange, queue, key): if CFG.worker.daemonized:", "= cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def", "priority, payload) action = task.action task_uuid = task.uuid try: func", "as e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\" % locals()) _notify('Errored Task',", "_perform_task(task): def _notify(event_type, exception=None): payload = {'task_uuid': task_uuid} if exception", "softtabstop=4 # # Copyright 2012 Rackspace # All Rights Reserved.", "AttributeError: logger.error(\"unrecognized action '%(action)s' for task task\" \" '%(task_uuid)s'\" %", "body['task_uuid'] try: task = db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not", "worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run worker as a daemon'),", "func = getattr(actions, action) except AttributeError: logger.error(\"unrecognized action '%(action)s' for", "as tempo_queue from tempo.openstack.common import cfg from tempo.openstack.common import exception", "task_uuid} if exception is not None: payload['exception'] = exception publisher_id", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "tempo.openstack.common import cfg from tempo.openstack.common import exception as common_exception CFG", "daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification came from') ] worker_group", "queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue,", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "import config from tempo import db from tempo import notifier", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "getattr(actions, action) except AttributeError: logger.error(\"unrecognized action '%(action)s' for task task\"", "to in writing, software # distributed under the License is", "# See the License for the specific language governing permissions", "= task.uuid try: func = getattr(actions, action) except AttributeError: logger.error(\"unrecognized", "connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while", "logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run worker as a", "or agreed to in writing, software # distributed under the", "under the License. import logging import kombu from tempo import", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "default='host', help='Where the notification came from') ] worker_group = cfg.OptGroup(name='worker',", "actions from tempo import config from tempo import db from", "% locals()) _notify('Finished Task') def _process_message(body, message): message.ack() task_uuid =", "# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Rackspace", "compliance with the License. # You may obtain a copy", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "from tempo.openstack.common import exception as common_exception CFG = config.CFG logger", "limitations under the License. import logging import kombu from tempo", "distributed under the License is distributed on an \"AS IS\"", "_notify(event_type, exception=None): payload = {'task_uuid': task_uuid} if exception is not", "%(e)s\" % locals()) _notify('Errored Task', exception=e) else: logger.debug(\"task '%(task_uuid)s' finished:", "the server for now import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue,", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "finished: returned successfully\" % locals()) _notify('Finished Task') def _process_message(body, message):", "Task') try: func(task) except Exception as e: logger.error(\"task '%(task_uuid)s' errored:", "of backgrounding the server for now import daemon with daemon.DaemonContext():", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "not use this file except in compliance with the License.", "= tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True:", "= kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def consume_messages(exchange,", "exception as common_exception CFG = config.CFG logger = logging.getLogger('tempo.worker') worker_opts", "writing, software # distributed under the License is distributed on", "you may not use this file except in compliance with", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "% locals()) return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" % locals()) _notify('Started", "now import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key) else: _consume_messages(exchange,", "for now import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key) else:", "CFG.worker.daemonized: # TODO(mdietz): there's a cleaner way to do this,", "common_exception CFG = config.CFG logger = logging.getLogger('tempo.worker') worker_opts = [", "CONDITIONS OF ANY KIND, either express or implied. # See", "from tempo import notifier from tempo import queue as tempo_queue", "cleaner way to do this, but this works well #", "def consume_messages(exchange, queue, key): if CFG.worker.daemonized: # TODO(mdietz): there's a", "publisher_id = CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload)", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\" % locals()) return", "_notify('Finished Task') def _process_message(body, message): message.ack() task_uuid = body['task_uuid'] try:", "queue as tempo_queue from tempo.openstack.common import cfg from tempo.openstack.common import", "exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message)", "do this, but this works well # as a way", "as common_exception CFG = config.CFG logger = logging.getLogger('tempo.worker') worker_opts =", "shiftwidth=4 softtabstop=4 # # Copyright 2012 Rackspace # All Rights", "= getattr(actions, action) except AttributeError: logger.error(\"unrecognized action '%(action)s' for task", "and # limitations under the License. import logging import kombu", "{'task_uuid': task_uuid} if exception is not None: payload['exception'] = exception", "else: logger.debug(\"task '%(task_uuid)s' finished: returned successfully\" % locals()) _notify('Finished Task')", "True: connection.drain_events() def consume_messages(exchange, queue, key): if CFG.worker.daemonized: # TODO(mdietz):", "try: task = db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\"", "title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type, exception=None):", "started: '%(action)s'\" % locals()) _notify('Started Task') try: func(task) except Exception", "OR CONDITIONS OF ANY KIND, either express or implied. #", "payload = {'task_uuid': task_uuid} if exception is not None: payload['exception']", "as a way of backgrounding the server for now import", "a way of backgrounding the server for now import daemon", "the License is distributed on an \"AS IS\" BASIS, #", "def _process_message(body, message): message.ack() task_uuid = body['task_uuid'] try: task =", "governing permissions and # limitations under the License. import logging", "import kombu from tempo import actions from tempo import config", "def _notify(event_type, exception=None): payload = {'task_uuid': task_uuid} if exception is", "worker as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification came", "config from tempo import db from tempo import notifier from", "law or agreed to in writing, software # distributed under", "found\" % locals()) return _perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg", "= kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection", "logger.error(\"task '%(task_uuid)s' errored: %(e)s\" % locals()) _notify('Errored Task', exception=e) else:", "= [ cfg.BoolOpt('daemonized', default=False, help='Run worker as a daemon'), cfg.StrOpt('publisher_id',", "import logging import kombu from tempo import actions from tempo", "'%(task_uuid)s' not found\" % locals()) return _perform_task(task) def _consume_messages(exchange, queue,", "] worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def", "may obtain a copy of the License at # #", "to do this, but this works well # as a", "# TODO(mdietz): there's a cleaner way to do this, but", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "may not use this file except in compliance with the", "<gh_stars>1-10 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "_notify('Errored Task', exception=e) else: logger.debug(\"task '%(task_uuid)s' finished: returned successfully\" %", "this file except in compliance with the License. # You", "logger.error(\"unrecognized action '%(action)s' for task task\" \" '%(task_uuid)s'\" % locals())", "there's a cleaner way to do this, but this works", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "# # Licensed under the Apache License, Version 2.0 (the", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "way to do this, but this works well # as", "func(task) except Exception as e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\" %", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "% locals()) _notify('Started Task') try: func(task) except Exception as e:", "return _perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct',", "from tempo.openstack.common import cfg from tempo.openstack.common import exception as common_exception", "task = db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\" %", "kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def consume_messages(exchange, queue, key):", "= config.CFG logger = logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False,", "try: func(task) except Exception as e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\"", "language governing permissions and # limitations under the License. import", "exception=e) else: logger.debug(\"task '%(task_uuid)s' finished: returned successfully\" % locals()) _notify('Finished", "# Copyright 2012 Rackspace # All Rights Reserved. # #", "from tempo import config from tempo import db from tempo", "task task\" \" '%(task_uuid)s'\" % locals()) return logger.debug(\"task '%(task_uuid)s' started:", "TODO(mdietz): there's a cleaner way to do this, but this", "exception is not None: payload['exception'] = exception publisher_id = CFG.worker.publisher_id", "logger = logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run worker", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "came from') ] worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts,", "= task.action task_uuid = task.uuid try: func = getattr(actions, action)", "or implied. # See the License for the specific language", "Rights Reserved. # # Licensed under the Apache License, Version", "kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection =", "queue, key): if CFG.worker.daemonized: # TODO(mdietz): there's a cleaner way", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type, exception=None): payload", "this, but this works well # as a way of", "not found\" % locals()) return _perform_task(task) def _consume_messages(exchange, queue, key):", "task_uuid = task.uuid try: func = getattr(actions, action) except AttributeError:", "import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key) else: _consume_messages(exchange, queue,", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import cfg from tempo.openstack.common import exception as common_exception CFG =", "= kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(),", "def _perform_task(task): def _notify(event_type, exception=None): payload = {'task_uuid': task_uuid} if", "event_type, priority, payload) action = task.action task_uuid = task.uuid try:", "(the \"License\"); # you may not use this file except", "successfully\" % locals()) _notify('Finished Task') def _process_message(body, message): message.ack() task_uuid", "# you may not use this file except in compliance", "if exception is not None: payload['exception'] = exception publisher_id =", "License. import logging import kombu from tempo import actions from", "consume_messages(exchange, queue, key): if CFG.worker.daemonized: # TODO(mdietz): there's a cleaner", "from tempo import queue as tempo_queue from tempo.openstack.common import cfg", "message.ack() task_uuid = body['task_uuid'] try: task = db.task_get(task_uuid) except common_exception.NotFound:", "# # Unless required by applicable law or agreed to", "\" '%(task_uuid)s'\" % locals()) return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" %", "errored: %(e)s\" % locals()) _notify('Errored Task', exception=e) else: logger.debug(\"task '%(task_uuid)s'", "Rackspace # All Rights Reserved. # # Licensed under the", "def _consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue", "# # Copyright 2012 Rackspace # All Rights Reserved. #", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "help='Where the notification came from') ] worker_group = cfg.OptGroup(name='worker', title='Worker", "CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type, exception=None): payload =", "cfg.BoolOpt('daemonized', default=False, help='Run worker as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where", "Version 2.0 (the \"License\"); # you may not use this", "Task', exception=e) else: logger.debug(\"task '%(task_uuid)s' finished: returned successfully\" % locals())", "logging import kombu from tempo import actions from tempo import", "# as a way of backgrounding the server for now", "notifier.notify(publisher_id, event_type, priority, payload) action = task.action task_uuid = task.uuid", "consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def", "tempo import db from tempo import notifier from tempo import", "not None: payload['exception'] = exception publisher_id = CFG.worker.publisher_id priority =", "implied. # See the License for the specific language governing", "under the Apache License, Version 2.0 (the \"License\"); # you", "logger.debug(\"task '%(task_uuid)s' finished: returned successfully\" % locals()) _notify('Finished Task') def", "return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" % locals()) _notify('Started Task') try:", "kombu from tempo import actions from tempo import config from", "Task') def _process_message(body, message): message.ack() task_uuid = body['task_uuid'] try: task", "by applicable law or agreed to in writing, software #", "tempo import queue as tempo_queue from tempo.openstack.common import cfg from", "for task task\" \" '%(task_uuid)s'\" % locals()) return logger.debug(\"task '%(task_uuid)s'", "import exception as common_exception CFG = config.CFG logger = logging.getLogger('tempo.worker')", "tempo_queue from tempo.openstack.common import cfg from tempo.openstack.common import exception as", "action) except AttributeError: logger.error(\"unrecognized action '%(action)s' for task task\" \"", "e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\" % locals()) _notify('Errored Task', exception=e)", "well # as a way of backgrounding the server for", "kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer =", "CFG = config.CFG logger = logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized',", "returned successfully\" % locals()) _notify('Finished Task') def _process_message(body, message): message.ack()", "backgrounding the server for now import daemon with daemon.DaemonContext(): _consume_messages(exchange,", "CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action =", "# All Rights Reserved. # # Licensed under the Apache", "2012 Rackspace # All Rights Reserved. # # Licensed under", "Copyright 2012 Rackspace # All Rights Reserved. # # Licensed", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "config.CFG logger = logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run", "Unless required by applicable law or agreed to in writing,", "tempo import actions from tempo import config from tempo import", "a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification came from') ]", "action = task.action task_uuid = task.uuid try: func = getattr(actions,", "the specific language governing permissions and # limitations under the", "except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\" % locals()) return _perform_task(task)", "task_uuid = body['task_uuid'] try: task = db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task", "[ cfg.BoolOpt('daemonized', default=False, help='Run worker as a daemon'), cfg.StrOpt('publisher_id', default='host',", "applicable law or agreed to in writing, software # distributed", "priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action = task.action", "is not None: payload['exception'] = exception publisher_id = CFG.worker.publisher_id priority", "task\" \" '%(task_uuid)s'\" % locals()) return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\"", "tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Rackspace # All", "in writing, software # distributed under the License is distributed", "durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer", "'%(action)s' for task task\" \" '%(task_uuid)s'\" % locals()) return logger.debug(\"task", "vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Rackspace #", "task.action task_uuid = task.uuid try: func = getattr(actions, action) except", "server for now import daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key)", "way of backgrounding the server for now import daemon with", "key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg,", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "if CFG.worker.daemonized: # TODO(mdietz): there's a cleaner way to do", "License, Version 2.0 (the \"License\"); # you may not use", "notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action = task.action task_uuid =", "group=worker_group) def _perform_task(task): def _notify(event_type, exception=None): payload = {'task_uuid': task_uuid}", "tempo.openstack.common import exception as common_exception CFG = config.CFG logger =", "# You may obtain a copy of the License at", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "locals()) _notify('Started Task') try: func(task) except Exception as e: logger.error(\"task", "'direct', durable=True) kombu_queue = kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection()", "cfg.StrOpt('publisher_id', default='host', help='Where the notification came from') ] worker_group =", "daemon with daemon.DaemonContext(): _consume_messages(exchange, queue, key) else: _consume_messages(exchange, queue, key)", "the License for the specific language governing permissions and #", "kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def consume_messages(exchange, queue,", "Apache License, Version 2.0 (the \"License\"); # you may not", "either express or implied. # See the License for the", "logger.error(\"Task '%(task_uuid)s' not found\" % locals()) return _perform_task(task) def _consume_messages(exchange,", "from tempo import db from tempo import notifier from tempo", "kombu.Queue(queue, exchange=kombu_xchg, key=key) connection = tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue)", "notifier from tempo import queue as tempo_queue from tempo.openstack.common import", "tempo_queue.get_connection() consumer = kombu.Consumer(connection.channel(), kombu_queue) consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "help='Run worker as a daemon'), cfg.StrOpt('publisher_id', default='host', help='Where the notification", "= CFG.worker.publisher_id priority = notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action", "payload) action = task.action task_uuid = task.uuid try: func =", "the notification came from') ] worker_group = cfg.OptGroup(name='worker', title='Worker options')", "import notifier from tempo import queue as tempo_queue from tempo.openstack.common", "worker_group = cfg.OptGroup(name='worker', title='Worker options') CFG.register_group(worker_group) CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task):", "_notify('Started Task') try: func(task) except Exception as e: logger.error(\"task '%(task_uuid)s'", "% locals()) return _perform_task(task) def _consume_messages(exchange, queue, key): kombu_xchg =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "CFG.register_opts(worker_opts, group=worker_group) def _perform_task(task): def _notify(event_type, exception=None): payload = {'task_uuid':", "permissions and # limitations under the License. import logging import", "try: func = getattr(actions, action) except AttributeError: logger.error(\"unrecognized action '%(action)s'", "except AttributeError: logger.error(\"unrecognized action '%(action)s' for task task\" \" '%(task_uuid)s'\"", "this works well # as a way of backgrounding the", "\"License\"); # you may not use this file except in", "exception=None): payload = {'task_uuid': task_uuid} if exception is not None:", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# distributed under the License is distributed on an \"AS", "locals()) return logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" % locals()) _notify('Started Task')", "= db.task_get(task_uuid) except common_exception.NotFound: logger.error(\"Task '%(task_uuid)s' not found\" % locals())", "# Unless required by applicable law or agreed to in", "import queue as tempo_queue from tempo.openstack.common import cfg from tempo.openstack.common", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "import actions from tempo import config from tempo import db", "logger.debug(\"task '%(task_uuid)s' started: '%(action)s'\" % locals()) _notify('Started Task') try: func(task)", "'%(action)s'\" % locals()) _notify('Started Task') try: func(task) except Exception as", "message): message.ack() task_uuid = body['task_uuid'] try: task = db.task_get(task_uuid) except", "_consume_messages(exchange, queue, key): kombu_xchg = kombu.Exchange(exchange, 'direct', durable=True) kombu_queue =", "except Exception as e: logger.error(\"task '%(task_uuid)s' errored: %(e)s\" % locals())", "You may obtain a copy of the License at #", "= logging.getLogger('tempo.worker') worker_opts = [ cfg.BoolOpt('daemonized', default=False, help='Run worker as", "= notifier.DEBUG notifier.notify(publisher_id, event_type, priority, payload) action = task.action task_uuid", "action '%(action)s' for task task\" \" '%(task_uuid)s'\" % locals()) return", "consumer.register_callback(_process_message) consumer.consume() while True: connection.drain_events() def consume_messages(exchange, queue, key): if", "the Apache License, Version 2.0 (the \"License\"); # you may", "a cleaner way to do this, but this works well" ]
[ "t0)) # adjust for fourier job['fourier'] = 'train_out_imag' in data_open", "seq = line.rstrip() nsites += 1 if len(pwm_counts) == 0:", "left_pad # print fasta file of positive outputs filter_fasta_out =", "NO --fineprint \"\"' weblogo_opts += ' -C \"#CB2026\" A A'", "p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor", "return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute PWM information", "'-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to annotate", "2.0 (the \"License\"); # you may not use this file", "os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open = h5py.File(data_file) test_seqs1", "compute filter output means per sequence filter_seqs = filter_outs.mean(axis=2) #", "> 0: s += 1 print('%d segments of length %d'", "# count nt_counts = [1] * 4 for i in", "content filters_ic.append(info_content(filter_pwm)) # add to the meme motif file meme_add(meme_out,", "vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA',", "np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start]) < trim_t: trim_start", "# Mean doesn't work well for the smaller segments for", "fstart = 0 # add primary sequence kmer += seqs[i][fstart:fend]", "np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)): for ti in range(num_targets):", "# make weblogo if filter_count > 0: weblogo_cmd = 'weblogo", "'_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr # #", ": \"\"\" # name by number filter_names = ['f%d' %", "file \"\"\" motif_protein = {} for line in open(meme_db_file): a", "to the stats table. # # Input # param_matrix: np.array", "< ic_end: print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability matrix:", "s += 1 print('%d segments of length %d' % (s,", "out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU", "of filter output scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :,", "'MA %s' % ' '.join(['%.2f' % (mult * n) for", "prefix ic_start = 0 while ic_start < filter_pwm.shape[0] and info_content(", "== 0: # initialize with the length for i in", "'%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' %", "choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1", "= 0.3 if trim_filters: # trim PWM of uninformative prefix", "range(4)]) return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f, filter_pwm,", "using the MEME DB file \"\"\" motif_protein = {} for", "= name_pieces[1] # plot density of filter output scores fmean,", "all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm = all_outs -", "of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by", "seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten:", "weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts,", "0, 'C': 1, 'G': 2, 'T': 3} pwm_counts = []", ": filter PWM array nsites (int) : number of filter", "= filter_outs_seg.max(axis=3) # break each segment into a new instance", "# grab annotation annotation = '.' name_pieces = filter_names[f].split('_') if", "file f (int) : filter index # filter_pwm (array) :", "the stats table. # # Input # param_matrix: np.array of", "ends [Default: %default]' ) (options, args) = parser.parse_args() if len(args)", "filter_seqs[filter_stds > 0] # downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1],", "plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot filter-target correlation", "else: motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415):", "range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j]", "ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t:", "f), maxpct_t=options.act_t) # make a PWM for the filter filter_pwm,", "database used to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true',", "filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) # make a", "info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start += 1 #", "= pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0,", "'std') print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out)", "q-value's by filter filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline()", "# # Mean doesn't work well for the smaller segments", "obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 #", "seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets =", "# filter test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert", "License for the specific language governing permissions and # limitations", "# out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): #", "filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if drop_dead:", "right_pad = filter_size - left_pad # print fasta file of", "for ci in range(trim_start, trim_end + 1): print( 'MA %s'", ") parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.')", "trim trim_start = 0 trim_end = param_matrix.shape[1] - 1 trim_t", "plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ #", "data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict #", "sites \"\"\" if not trim_filters: ic_start = 0 ic_end =", "%.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out)", "MEME motif format file and print intro Attrs: meme_file (str)", "parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold (as proportion", ": filename of MEME db Returns: filter_names [str] : \"\"\"", "b, f)) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) #", "\"\"\" Compute PWM information content. In the original analysis, I", "v]: max_n = n if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n])", "- t0)) # adjust for fourier job['fourier'] = 'train_out_imag' in", "test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders job", "print(filter_weights.shape) # test t0 = time.time() layer_filter_outs, _ = dr.hidden(sess,", "print header for later panda reading header_cols = ('', 'consensus',", "1, 'G': 2, 'T': 3} pwm_counts = [] nsites =", "print( 'letter-probability matrix: alength= 4 w= %d nsites= %d' %", "(trim_end + 1 - trim_start), file=possum_out) for ci in range(trim_start,", "filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\" Hash", "index # filter_pwm (array) : filter PWM array nsites (int)", "open MEME file \"\"\" nts = {'A': 0, 'C': 1,", "\"\"\" Name the filters using Tomtom matches. Attrs: num_filters (int)", "for later panda reading header_cols = ('', 'consensus', 'annotation', 'ic',", "the License. # ========================================================================= from __future__ import print_function from optparse", "seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap(", "float(s) - (l / s) > 0: s += 1", "4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca()", "n) for n in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END',", "not trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0] - 1", "of high scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs,", "# filter_outs: # filter_names: # target_names: # out_pdf: ################################################################################ def", "layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters = filter_weights.shape[0] filter_size", "basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2]", "%s' % ' '.join(['%.2f' % (mult * n) for n", "# filter_possum # # Write a Possum-style motif # #", "'annotation', 'ic', 'mean', 'std') print('%3s %19s %10s %5s %6s %6s'", "quite as well as you # might expect. # #", "'%s/filter%d_heat.pdf' % (options.out_dir, f)) # write possum motif file filter_possum(filter_weights[f,", "filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors =", "test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 =", "out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean =", "#plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a", "# # Plot a clustered heatmap of filter activations in", "frequencies:', file=meme_out) print('A %.4f C %.4f G %.4f T %.4f'", "param_matrix[max_n, v]: max_n = n if param_matrix[max_n, v] > 0:", "s, l / s)) # mean across the segments filter_outs_mean", "as plt import numpy as np import pandas as pd", "info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -= 1 if ic_start", "T' ################################################################################ # main ################################################################################ def main(): usage = 'usage:", "in range(ic_start, ic_end + 1): print('%.4f %.4f %.4f %.4f' %", "cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG',", "transpose: pwm = np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc,", "filter plots ################################################################# # also save information contents filters_ic =", "convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters", "motif_protein = {} for line in open(meme_db_file): a = line.split()", "import copy, os, pdb, random, shutil, subprocess, time import h5py", "filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)): for ti", "OF ANY KIND, either express or implied. # See the", "= make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites < 10: #", "= filter_names[f].split('_') if len(name_pieces) > 1: annotation = name_pieces[1] #", "See the License for the specific language governing permissions and", "'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]]", "make a PWM for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa'", "dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True,", "# read in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' %", "to in writing, software # distributed under the License is", "f, filter_pwm, nsites, trim_filters=False): \"\"\" Print a filter to the", "'.join(['%.2f' % (mult * n) for n in param_matrix[:, ci]]),", "(-fstart) fstart = 0 # add primary sequence kmer +=", ">= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -=", "%ds' % (time.time() - t0)) # adjust for fourier job['fourier']", "basenji_motifs.py Collect statistics and make plots to explore the first", "in range(4)]) return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f,", "print('AP DNA', file=possum_out) print('LE %d' % (trim_end + 1 -", "whiten: dist = 'euclidean' else: dist = 'cosine' plt.figure() sns.clustermap(", "' '.join(['%.2f' % (mult * n) for n in param_matrix[:,", "or agreed to in writing, software # distributed under the", "nt_counts[nts[nt]] += 1 except KeyError: pass # normalize nt_sum =", "[Default: %default]' ) (options, args) = parser.parse_args() if len(args) !=", "1 else: ic_t = 0.2 # trim PWM of uninformative", "in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db)", "trim_start = 0 trim_end = param_matrix.shape[1] - 1 trim_t =", "# out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2)", "np.array([0.25] * 4) # normalize pwm_freqs = [] for i", "data_open['test_out'] try: target_names = list(data_open['target_labels']) except KeyError: target_names = ['t%d'", "filter_pwm.shape[0] - 1 else: ic_t = 0.2 # trim PWM", "filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign filter's best match for", "later panda reading header_cols = ('', 'consensus', 'annotation', 'ic', 'mean',", "plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close()", "line.split() if len(a) > 0 and a[0] == 'MOTIF': if", "+ filter_size # if it starts in left_pad if fstart", "in seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError: pass #", "compliance with the License. # You may obtain a copy", "rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo #", "nsites < 10: # no information filters_ic.append(0) else: # compute", "motif file filter_possum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_possum.txt' %", "a PWM for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' %", "segments of length %d' % (s, l / s)) #", "= filter_outs.shape[1] l = filter_outs.shape[2] s = 5 while l", "# output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count", "nt_counts = [1] * 4 for i in range(len(seqs)): for", "meme_intro(meme_file, seqs): \"\"\" Open MEME motif format file and print", "copy, os, pdb, random, shutil, subprocess, time import h5py import", "range(1, 4): if param_matrix[n, v] > param_matrix[max_n, v]: max_n =", "and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -= 1 if", "ic_end = filter_pwm.shape[0] - 1 else: ic_t = 0.2 #", "# targets. # # Input # filter_outs: # filter_names: #", "filter test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert to", "0 # add primary sequence kmer += seqs[i][fstart:fend] # if", "tomtom_file is not None and meme_db_file is not None: motif_protein", "'N' * (fend - len(seqs[i])) # output print('>%d_%d' % (i,", "not use this file except in compliance with the License.", "make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites < 10: # no", "# run tomtom subprocess.call( 'tomtom -dist pearson -thresh 0.1 -oc", "seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError: pass # normalize", "test_seqs) for f in range(num_filters): print('Filter %d' % f) #", "file Attrs: meme_out : open file f (int) : filter", "you may not use this file except in compliance with", "plot_score_density # # Plot the score density and print to", "matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): #", "%d' % (s, l / s)) # split into multiple", "filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2] s = 5", "filter_outs.shape[0] num_targets = len(target_names) if seq_op == 'mean': filter_outs_seq =", "count nt_counts = [1] * 4 for i in range(len(seqs)):", "('', 'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s %19s %10s %5s", "% options.out_dir, test_seqs) for f in range(num_filters): print('Filter %d' %", "open(meme_db_file): a = line.split() if len(a) > 0 and a[0]", "of the filter's parameter matrix # out_pdf: ################################################################################ def plot_score_density(f_scores,", "= 1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm = [1", "print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close()", "# __main__ ################################################################################ if __name__ == '__main__': main() # pdb.runcall(main)", "################################################################################ # main ################################################################################ def main(): usage = 'usage: %prog", "%.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): \"\"\"", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "file=possum_out) print('ID %s' % motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE", "if not trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0] -", "basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher(", "filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi in", "-C \"#0C8040\" T T' ################################################################################ # main ################################################################################ def main():", "ic += -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc", "'%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) #", "print('A %.4f C %.4f G %.4f T %.4f' % tuple(nt_freqs),", "%5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close() ################################################################# # global", "sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std", "right_pad if fend > len(seqs[i]): kmer += 'N' * (fend", "plot_target_corr # # Plot a clustered heatmap of correlations between", "== 'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) #", "\"#CB2026\" A A' weblogo_opts += ' -C \"#34459C\" C C'", "j - left_pad fend = fstart + filter_size # if", "= fstart + filter_size # if it starts in left_pad", "multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f, s, l /", "max looks OK. Still, similar motifs don't cluster quite as", "'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix)", "= float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum for i in", "hg19 value of 0.415. \"\"\" pseudoc = 1e-9 if transpose:", "file=meme_out) print('', file=meme_out) print('Background letter frequencies:', file=meme_out) print('A %.4f C", "filter parameters as a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' %", "%5s %6s %6s' % header_cols, file=table_out) for f in range(num_filters):", "model using the given sequences. ''' weblogo_opts = '-X NO", ": total number of filters tomtom_file (str) : filename of", "for f in range(num_filters): # collapse to a consensus motif", "for f in range(num_filters): print('Filter %d' % f) # plot", "= 0 for i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]):", "> 0] filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live),", "number of filter sites \"\"\" if not trim_filters: ic_start =", "filter_seqs = filter_seqs[filter_stds > 0] # downsample sequences seqs_i =", "similar motifs don't cluster quite as well as you #", "param_matrix[n, v] > param_matrix[max_n, v]: max_n = n if param_matrix[max_n,", "%default]' ) parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample sequences from", "Attrs: num_filters (int) : total number of filters tomtom_file (str)", "options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max')", "of uninformative suffix ic_end = filter_pwm.shape[0] - 1 while ic_end", "test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width',", "plots to explore the first convolution layer of the given", "options.trim_filters) meme_out.close() ################################################################# # annotate filters ################################################################# # run tomtom", "range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: # construct kmer kmer", "= line.split() if len(a) > 0 and a[0] == 'MOTIF':", "construct kmer kmer = '' # determine boundaries, considering padding", "if options.sample is not None: # choose sampled indexes sample_i", "print('', file=meme_out) print('Background letter frequencies:', file=meme_out) print('A %.4f C %.4f", "% (options.out_dir, options.out_dir, options.meme_db), shell=True) # read in annotations filter_names", "+= np.array([0.25] * 4) # normalize pwm_freqs = [] for", "trim_start]) < trim_t: trim_start += 1 # trim PWM of", "(s, l / s)) # split into multiple segments filter_outs_seg", "background freqs Returns: mem_out : open MEME file \"\"\" nts", "weblogo of high scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size,", "I ought to switch to the true hg19 value of", "parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat maps describing", "pseudocounts for line in open(filter_fasta): if line[0] != '>': seq", "range(trim_start, trim_end + 1): print( 'MA %s' % ' '.join(['%.2f'", "= {'A': 0, 'C': 1, 'G': 2, 'T': 3} pwm_counts", "to protein names using the MEME DB file \"\"\" motif_protein", "# also save information contents filters_ic = [] meme_out =", "# out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False)", "collapse to a consensus motif consensus = filter_motif(filter_weights[f, :, :])", "it starts in left_pad if fstart < 0: kmer +=", "a Possum-style motif # # Input # param_matrix: np.array of", "layer of the given model using the given sequences. '''", "= test_targets[sample_i] # convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) #################################################################", "# out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'):", "motif_id's to protein names using the MEME DB file \"\"\"", "for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f))", ") (options, args) = parser.parse_args() if len(args) != 3: parser.error(", "2, 1), (s * b, f)) # whiten if whiten:", "# out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None):", "seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2)", "plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute filter output means per", "0 for i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if", "filename seqs [str] : list of strings for obtaining background", "Make a PWM for this filter from its top hits", ") parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample sequences from the", "target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's", "'letter-probability matrix: alength= 4 w= %d nsites= %d' % (ic_end", "= [1] * 4 for i in range(len(seqs)): for nt", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): # possible", "else: # compute and save information content filters_ic.append(info_content(filter_pwm)) # add", "whiten=True, drop_dead=True): # compute filter output means per sequence filter_seqs", "range(num_filters)] # name by protein if tomtom_file is not None", "the filter's parameter matrix # out_pdf: ################################################################################ def filter_motif(param_matrix): nts", "occurrences # # Input # param_matrix: np.array of the filter's", "all_outs_mean left_pad = (filter_size - 1) // 2 right_pad =", "adjust for fourier job['fourier'] = 'train_out_imag' in data_open if job['fourier']:", "nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites < 10:", "np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 -", "matches. Attrs: num_filters (int) : total number of filters tomtom_file", "for fi in range(len(filter_names_live)): for ti in range(num_targets): cor, p", "- 4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): \"\"\" Print", "# Write a Possum-style motif # # Input # param_matrix:", "print( 'MA %s' % ' '.join(['%.2f' % (mult * n)", "a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s *", "a single DNA motif. # # Input # param_matrix: np.array", "> 0 and a[0] == 'MOTIF': if a[2][0] == '(':", "might expect. # # Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs,", "' -C \"#CB2026\" A A' weblogo_opts += ' -C \"#34459C\"", "file except in compliance with the License. # You may", "filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters ################################################################# #", "Input # filter_outs: # filter_names: # target_names: # out_pdf: ################################################################################", "score density and print to the stats table. # #", "0]) print(filter_weights.shape) # test t0 = time.time() layer_filter_outs, _ =", "model parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1]", "targets. # # Input # filter_outs: # filter_names: # target_names:", "= 0 trim_end = param_matrix.shape[1] - 1 trim_t = 0.3", "% (s, l / s)) # split into multiple segments", ": open file f (int) : filter index # filter_pwm", "filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs)", "max) to consider for PWM [Default: %default]' ) parser.add_option( '-d',", "fend = fstart + filter_size # if it starts in", "ought to switch to the true hg19 value of 0.415.", "filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # individual filter plots #################################################################", "print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1", "and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] =", "'T': 3} pwm_counts = [] nsites = 4 # pseudocounts", "= test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr", "n if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return", "the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf):", "the given model using the given sequences. ''' weblogo_opts =", "'>': seq = line.rstrip() nsites += 1 if len(pwm_counts) ==", "by protein if tomtom_file is not None and meme_db_file is", "parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth']", "4)) # count for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] +=", "i in range(len(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]] +=", "= parser.parse_args() if len(args) != 3: parser.error( 'Must provide Basenji", "batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test", "filter_names [str] : \"\"\" # name by number filter_names =", "vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal')", "for j in range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j])", "0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum #", "information content. In the original analysis, I used a bg_gc=0.5.", "'w') # print header for later panda reading header_cols =", "# if it starts in left_pad if fstart < 0:", "= '-X NO -Y NO --errorbars NO --fineprint \"\"' weblogo_opts", "nsites += 1 if len(pwm_counts) == 0: # initialize with", "between filter activations and # targets. # # Input #", "options.trim_filters) # plot weblogo of high scoring outputs plot_filter_logo( filter_outs[:,", "job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else:", "data_open if job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag =", "meme_out = open(meme_file, 'w') # print intro material print('MEME version", "Plot a clustered heatmap of correlations between filter activations and", "pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8,", "# if it ends in right_pad if fend > len(seqs[i]):", "fstd = plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f))", "PWM of uninformative suffix while trim_end >= 0 and np.max(param_matrix[:,", "T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def", "param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1]", "Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b", "KIND, either express or implied. # See the License for", "= maxpct_t * all_outs_norm.max() + all_outs_mean left_pad = (filter_size -", "filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs =", "from sklearn import preprocessing import tensorflow as tf import basenji", "+ 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif # #", "parser.error( 'Must provide Basenji parameters and model files and test", "basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver =", "np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False):", "by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std >", "+= 1 print('%d segments of length %d' % (s, l", "proportion of max) to consider for PWM [Default: %default]' )", "dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to annotate motifs')", "= filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2] s =", "print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background letter frequencies:', file=meme_out) print('A", "np.array of the filter's parameter matrix # out_pdf: ################################################################################ def", "(the \"License\"); # you may not use this file except", "header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s %19s", "################################################################################ # __main__ ################################################################################ if __name__ == '__main__': main() #", "and make plots to explore the first convolution layer of", "def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): # possible trim trim_start", "shutil, subprocess, time import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot", "= a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() #", "################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): # possible trim", "# plot weblogo of high scoring outputs plot_filter_logo( filter_outs[:, :,", "= open('%s/table.txt' % options.out_dir, 'w') # print header for later", "parameters as a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir,", "basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders job = basenji.dna_io.read_job_params(params_file)", "obtaining background freqs Returns: mem_out : open MEME file \"\"\"", "None and meme_db_file is not None: motif_protein = get_motif_proteins(meme_db_file) #", "sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax", "= data_open['test_in'] test_targets = data_open['test_out'] try: target_names = list(data_open['target_labels']) except", "= tf.train.Saver() with tf.Session() as sess: # load variables into", "clustered heatmap of filter activations in sequence segments. # #", "\"\"\" nts = {'A': 0, 'C': 1, 'G': 2, 'T':", ": filename of Tomtom output table. meme_db_file (str) : filename", "0 for i in range(pwm.shape[0]): for j in range(4): #", "Plot a heatmap of the filter's parameters. # # Input", "heatmap of filter activations in sequence segments. # # Mean", "file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat # # Plot", "1 print('%d segments of length %d' % (s, l /", "ends in right_pad if fend > len(seqs[i]): kmer += 'N'", "'-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions off the filter", "try: target_names = list(data_open['target_labels']) except KeyError: target_names = ['t%d' %", "for ti in range(test_targets.shape[1])] if options.sample is not None: #", "consensus motif consensus = filter_motif(filter_weights[f, :, :]) # grab annotation", "format file and print intro Attrs: meme_file (str) : filename", "default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to annotate motifs') parser.add_option(", "table_out.close() ################################################################# # global filter plots ################################################################# if options.plot_heats: #", "print('Background letter frequencies:', file=meme_out) print('A %.4f C %.4f G %.4f", "Tomtom output table. meme_db_file (str) : filename of MEME db", "param_matrix.shape[1] - 1 trim_t = 0.3 if trim_filters: # trim", "parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o',", "doesn't work well for the smaller segments for some reason,", "- trim_start), file=possum_out) for ci in range(trim_start, trim_end + 1):", "with the length for i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4))", "Input # param_matrix: np.array of the filter's parameter matrix #", "implied. # See the License for the specific language governing", "dest='plot_heats', default=False, action='store_true', help= 'Plot heat maps describing filter activations", "* 4 for i in range(len(seqs)): for nt in seqs[i]:", "parameter matrix # out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure()", "normalize pwm_freqs = [] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] /", "return ''.join(motif_list) ################################################################################ # filter_possum # # Write a Possum-style", "0.415. \"\"\" pseudoc = 1e-9 if transpose: pwm = np.transpose(pwm)", "MEME file Attrs: meme_out : open file f (int) :", "-= 1 if ic_start < ic_end: print('MOTIF filter%d' % f,", "for writing meme_out = open(meme_file, 'w') # print intro material", "the filter's parameters. # # Input # param_matrix: np.array of", "print intro Attrs: meme_file (str) : filename seqs [str] :", "sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 =", "info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute PWM information content. In the", "if it starts in left_pad if fstart < 0: kmer", "['t%d' % ti for ti in range(test_targets.shape[1])] if options.sample is", "License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law", "np import pandas as pd from scipy.stats import spearmanr import", "seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png =", "+ 1, nsites), file=meme_out) for i in range(ic_start, ic_end +", "{} tt_in = open(tomtom_file) tt_in.readline() for line in tt_in: a", "in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] +=", "data_open['test_in'] test_targets = data_open['test_out'] try: target_names = list(data_open['target_labels']) except KeyError:", "in range(num_filters): # collapse to a consensus motif consensus =", "print('%d segments of length %d' % (s, l / s))", "s = 5 while l / float(s) - (l /", "activations in sequence segments. # # Mean doesn't work well", "test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert to letters", "Open MEME motif format file and print intro Attrs: meme_file", "filter's parameters. # # Input # param_matrix: np.array of the", "Collapse the filter parameter matrix to a single DNA motif.", "filter_outs.mean(axis=2) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose", "'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False,", "= spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor cor_df", "seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else: dist", "plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment", "dest='act_t', default=0.5, type='float', help= 'Activation threshold (as proportion of max)", "filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for", "each segment into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2,", "# Plot a heatmap of the filter's parameters. # #", "for nt in seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError:", "# Plot the score density and print to the stats", "Unless required by applicable law or agreed to in writing,", "a clustered heatmap of correlations between filter activations and #", "% out_prefix, 'w') filter_count = 0 for i in range(filter_outs.shape[0]):", "- np.min( param_matrix[:, trim_start]) < trim_t: trim_start += 1 #", "nsites= %d' % (ic_end - ic_start + 1, nsites), file=meme_out)", "np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic", "< %s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True)", "primary sequence kmer += seqs[i][fstart:fend] # if it ends in", "fmean, fstd) print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols,", "fstart = j - left_pad fend = fstart + filter_size", "plots ################################################################# if options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf'", "open(filter_fasta): if line[0] != '>': seq = line.rstrip() nsites +=", "= 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0,", "= get_motif_proteins(meme_db_file) # hash motifs and q-value's by filter filter_motifs", "* np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta): \"\"\" Make", "'w') filter_count = 0 for i in range(filter_outs.shape[0]): for j", "the specific language governing permissions and # limitations under the", "table of information ################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w')", "of the given model using the given sequences. ''' weblogo_opts", "meme_out : open file f (int) : filter index #", "Copyright 2017 Calico LLC # Licensed under the Apache License,", "[nt_counts[i] / nt_sum for i in range(4)] # open file", "sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 = test_seqs1[sample_i] test_targets", "hits \"\"\" nts = {'A': 0, 'C': 1, 'G': 2,", "vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300)", "maps describing filter activations in the test sequences [Default: %default]'", "weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint \"\"'", "filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites <", "# Plot a clustered heatmap of correlations between filter activations", "fstd) print('%-3d %19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out)", "parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used to", "original analysis, I used a bg_gc=0.5. For any future analysis,", "try: nt_counts[nts[nt]] += 1 except KeyError: pass # normalize nt_sum", "PWM array nsites (int) : number of filter sites \"\"\"", "PWM for the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir,", "job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1)))", "range(test_targets.shape[1])] if options.sample is not None: # choose sampled indexes", "np.min( param_matrix[:, trim_end]) < trim_t: trim_end -= 1 if trim_start", "filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test t0", "= (f, consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d %19s %10s", "# assign filter's best match for fi in filter_motifs: top_motif", "filters_ic.append(info_content(filter_pwm)) # add to the meme motif file meme_add(meme_out, f,", "shell=True) # read in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt'", "starts in left_pad if fstart < 0: kmer += 'N'", "# add to the meme motif file meme_add(meme_out, f, filter_pwm,", "of length %d' % (s, l / s)) # split", "# out_pdf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list =", "for fi in range(num_filters)] # name by protein if tomtom_file", "governing permissions and # limitations under the License. # =========================================================================", "> 1: annotation = name_pieces[1] # plot density of filter", "parameter matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):", "subprocess.call( 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s'", "################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w') # print header", "# mean across the segments filter_outs_mean = filter_outs_seg.max(axis=3) # break", "filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0] filter_names_live", "+ 1 - trim_start), file=possum_out) for ci in range(trim_start, trim_end", "+ all_outs_mean left_pad = (filter_size - 1) // 2 right_pad", "it ends in right_pad if fend > len(seqs[i]): kmer +=", "% options.out_dir, 'w') # print header for later panda reading", "+= 'N' * (-fstart) fstart = 0 # add primary", "if len(name_pieces) > 1: annotation = name_pieces[1] # plot density", "time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0]", "out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return", "PWM of uninformative suffix ic_end = filter_pwm.shape[0] - 1 while", "Plot a clustered heatmap of filter activations in # #", "trim PWM of uninformative prefix while trim_start < param_matrix.shape[1] and", "= np.reshape(filter_outs, (b, f, s, l / s)) # mean", "+= 'N' * (fend - len(seqs[i])) # output print('>%d_%d' %", "filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] # downsample", "filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start +=", "hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9)", "0: kmer += 'N' * (-fstart) fstart = 0 #", "subprocess, time import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as", "LLC # Licensed under the Apache License, Version 2.0 (the", "into multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f, s, l", "\"#FBB116\" G G' weblogo_opts += ' -C \"#0C8040\" T T'", "(mult * n) for n in param_matrix[:, ci]]), file=possum_out) print('END',", "print intro material print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET=", "= np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True,", "if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm =", "% options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to protein", "= np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean'", "analysis, I used a bg_gc=0.5. For any future analysis, I", "[] nsites = 4 # pseudocounts for line in open(filter_fasta):", "of positive outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count", "trim PWM of uninformative prefix ic_start = 0 while ic_start", "file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background letter frequencies:', file=meme_out)", "NO -Y NO --errorbars NO --fineprint \"\"' weblogo_opts += '", "if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open =", "filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment heatmap", "\"\"\" motif_protein = {} for line in open(meme_db_file): a =", "+ pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j]", "filters tomtom_file (str) : filename of Tomtom output table. meme_db_file", "dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.')", "\"\"\" pseudoc = 1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm", "cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat #", "all_outs_norm.max() + all_outs_mean left_pad = (filter_size - 1) // 2", "pandas as pd from scipy.stats import spearmanr import seaborn as", "0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:,", "KeyError: pass # normalize nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i]", "density and print to the stats table. # # Input", "+ 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close() ################################################################################", "range(len(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]] += 1 except", "(int) : total number of filters tomtom_file (str) : filename", "G G' weblogo_opts += ' -C \"#0C8040\" T T' ################################################################################", "ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end", "to the meme motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters)", "99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else: dist =", "out_pdf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list = []", "model_file = args[1] data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir)", "'w') # print intro material print('MEME version 4', file=meme_out) print('',", "'usage: %prog [options] <params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option(", "test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver = tf.train.Saver() with", "action='store_true', help= 'Plot heat maps describing filter activations in the", "else: dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True,", "// 2 right_pad = filter_size - left_pad # print fasta", "suffix while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min(", "tt_in = open(tomtom_file) tt_in.readline() for line in tt_in: a =", "possum_out = open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out)", "smaller segments for some reason, but taking # the max", "open('%s.fa' % out_prefix, 'w') filter_count = 0 for i in", "import pandas as pd from scipy.stats import spearmanr import seaborn", "(options.out_dir, f), maxpct_t=options.act_t) # make a PWM for the filter", "-dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir,", "param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2,", "(options.out_dir, f), options.trim_filters) # plot weblogo of high scoring outputs", "print fasta file of positive outputs filter_fasta_out = open('%s.fa' %", "h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets = data_open['test_out'] try: target_names =", "plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols =", "plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1]", "args[1] data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# #", "You may obtain a copy of the License at #", "col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] +", "4) # normalize pwm_freqs = [] for i in range(len(pwm_counts)):", "break each segment into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean,", "# adjust for fourier job['fourier'] = 'train_out_imag' in data_open if", "output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme'", "if options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir)", "4 w= %d nsites= %d' % (ic_end - ic_start +", "segments filter_outs_mean = filter_outs_seg.max(axis=3) # break each segment into a", "expect. # # Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf,", "dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered", "for n in range(1, 4): if param_matrix[n, v] > param_matrix[max_n,", "0: weblogo_cmd = 'weblogo %s < %s.fa > %s.eps' %", "' -C \"#FBB116\" G G' weblogo_opts += ' -C \"#0C8040\"", "# initialize with the length for i in range(len(seq)): pwm_counts.append(np.array([1.0]", "the filter's parameter matrix # out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id,", "_ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) #", "ti]) filter_target_cors[fi, ti] = cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live,", "b = filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2] s", "%default]' ) (options, args) = parser.parse_args() if len(args) != 3:", "range(ic_start, ic_end + 1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]),", "plt import numpy as np import pandas as pd from", "= '.' name_pieces = filter_names[f].split('_') if len(name_pieces) > 1: annotation", "+ 1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('',", "import OptionParser import copy, os, pdb, random, shutil, subprocess, time", "# split into multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f,", "file filter_possum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir,", "pwm_counts[i] += np.array([0.25] * 4) # normalize pwm_freqs = []", "> 0] # downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500)", "random, shutil, subprocess, time import h5py import matplotlib matplotlib.use('PDF') import", "options.out_dir, options.meme_db), shell=True) # read in annotations filter_names = name_filters(", "for i in range(ic_start, ic_end + 1): print('%.4f %.4f %.4f", "1 if trim_start < trim_end: possum_out = open(possum_file, 'w') print('BEGIN", "and # limitations under the License. # ========================================================================= from __future__", "C C' weblogo_opts += ' -C \"#FBB116\" G G' weblogo_opts", "motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\"", "file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat # # Plot a heatmap", "max_n = n if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else:", "a bg_gc=0.5. For any future analysis, I ought to switch", "drop_dead=True): # compute filter output means per sequence filter_seqs =", "= 0 # add primary sequence kmer += seqs[i][fstart:fend] #", ", size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo # # Plot", "#plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif # # Collapse the", "sequences [Default: %default]' ) parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample", "indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 = test_seqs1[sample_i]", "'Must provide Basenji parameters and model files and test data", "plot weblogo of high scoring outputs plot_filter_logo( filter_outs[:, :, f],", "% (options.out_dir, f)) if nsites < 10: # no information", "[]).append((qval, motif_id)) tt_in.close() # assign filter's best match for fi", "return f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################ if __name__ ==", "+ pwm[i][j]) return ic def make_filter_pwm(filter_fasta): \"\"\" Make a PWM", "qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign filter's", "matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max()", "range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t:", "License. # You may obtain a copy of the License", "for this filter from its top hits \"\"\" nts =", "# trim PWM of uninformative prefix ic_start = 0 while", "< ic_t: ic_end -= 1 if ic_start < ic_end: print('MOTIF", "meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in range(num_filters):", "pd from scipy.stats import spearmanr import seaborn as sns from", "1 # trim PWM of uninformative suffix while trim_end >=", "%s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################", "from the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true',", "filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs,", "heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir, f)) # write", "(options.out_dir, options.out_dir, options.meme_db), shell=True) # read in annotations filter_names =", "\"\"\" # name by number filter_names = ['f%d' % fi", "activations in the test sequences [Default: %default]' ) parser.add_option( '-s',", "filter_outs.max(axis=2) # std is sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0)", "parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions off the", "target_names = ['t%d' % ti for ti in range(test_targets.shape[1])] if", "file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name the", "data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets = data_open['test_out']", "job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr = basenji.seqnn.SeqNN()", "# normalize nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum", "if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool'])", "ic def make_filter_pwm(filter_fasta): \"\"\" Make a PWM for this filter", "filter output means per sequence filter_seqs = filter_outs.mean(axis=2) # whiten", "ic_end -= 1 if ic_start < ic_end: print('MOTIF filter%d' %", "parameters. # # Input # param_matrix: np.array of the filter's", "of uninformative suffix while trim_end >= 0 and np.max(param_matrix[:, trim_end])", "################################################################################ # plot_filter_logo # # Plot a weblogo of the", "%d' % (trim_end + 1 - trim_start), file=possum_out) for ci", "make_filter_pwm(filter_fasta): \"\"\" Make a PWM for this filter from its", "filter_outs[i, j] > raw_t: # construct kmer kmer = ''", "'%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) # make a PWM for", "# compute filter output means per sequence filter_seqs = filter_outs.mean(axis=2)", "tf import basenji ''' basenji_motifs.py Collect statistics and make plots", "= all_outs - all_outs_mean raw_t = maxpct_t * all_outs_norm.max() +", "plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax =", "'max') def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to protein names using", "usage = 'usage: %prog [options] <params_file> <model_file> <data_file>' parser =", "kmer = '' # determine boundaries, considering padding fstart =", "in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: # construct kmer", "% fi for fi in range(num_filters)] # name by protein", "!= '>': seq = line.rstrip() nsites += 1 if len(pwm_counts)", "cluster quite as well as you # might expect. #", "filter_count = 0 for i in range(filter_outs.shape[0]): for j in", "1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) <", "model_file) # get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights),", "reading header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s", "range(4)] # open file for writing meme_out = open(meme_file, 'w')", "plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax)", "cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] +", "and meme_db_file is not None: motif_protein = get_motif_proteins(meme_db_file) # hash", "MEME file \"\"\" nts = {'A': 0, 'C': 1, 'G':", "################################################################# # annotate filters ################################################################# # run tomtom subprocess.call( 'tomtom", "table. meme_db_file (str) : filename of MEME db Returns: filter_names", "Compute PWM information content. In the original analysis, I used", "in range(len(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]] += 1", "filter_pwm (array) : filter PWM array nsites (int) : number", "ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf)", "float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum for i in range(4)]", "def meme_intro(meme_file, seqs): \"\"\" Open MEME motif format file and", "save information content filters_ic.append(info_content(filter_pwm)) # add to the meme motif", "out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat #", "and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end]) < trim_t: trim_end", "# get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2,", "['f%d' % fi for fi in range(num_filters)] # name by", "meme_db_file is not None: motif_protein = get_motif_proteins(meme_db_file) # hash motifs", "+= seqs[i][fstart:fend] # if it ends in right_pad if fend", "placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2]", "time %ds' % (time.time() - t0)) # adjust for fourier", "tomtom_file (str) : filename of Tomtom output table. meme_db_file (str)", "trim_start += 1 # trim PWM of uninformative suffix while", "# annotate filters ################################################################# # run tomtom subprocess.call( 'tomtom -dist", "num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print a table", "intro material print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT',", "write possum motif file filter_possum(filter_weights[f, :, :], 'filter%d' % f,", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "= 0 while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start +", "filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0]", "0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist", "for the specific language governing permissions and # limitations under", "file=meme_out) print( 'letter-probability matrix: alength= 4 w= %d nsites= %d'", "file=meme_out) print('Background letter frequencies:', file=meme_out) print('A %.4f C %.4f G", "% motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr # # Plot", "ic_t: ic_start += 1 # trim PWM of uninformative suffix", "protein names using the MEME DB file \"\"\" motif_protein =", "batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool'])", "np.array(filter_names) ################################################################################ # plot_target_corr # # Plot a clustered heatmap", "[1] * 4 for i in range(len(seqs)): for nt in", "'C': 1, 'G': 2, 'T': 3} pwm_counts = [] nsites", "fi = int(a[0][6:]) motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi,", "language governing permissions and # limitations under the License. #", "for ti in range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs,", "required by applicable law or agreed to in writing, software", "ACGT', file=meme_out) print('', file=meme_out) print('Background letter frequencies:', file=meme_out) print('A %.4f", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "filter_names: # target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names,", ":, :]) # grab annotation annotation = '.' name_pieces =", "for v in range(param_matrix.shape[1]): max_n = 0 for n in", "import print_function from optparse import OptionParser import copy, os, pdb,", "% f) # plot filter parameters as a heatmap plot_filter_heat(filter_weights[f,", "target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets = len(target_names) if", "/ s)) # split into multiple segments filter_outs_seg = np.reshape(filter_outs,", "1 - trim_start), file=possum_out) for ci in range(trim_start, trim_end +", "%19s %10s %5s %6s %6s' % header_cols, file=table_out) for f", "its top hits \"\"\" nts = {'A': 0, 'C': 1,", "abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range)", "def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf)", "+= 1 if len(pwm_counts) == 0: # initialize with the", "- left_pad # print fasta file of positive outputs filter_fasta_out", "agreed to in writing, software # distributed under the License", ":, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) #", "useful variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# #", "= np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)): for ti in", "distributed under the License is distributed on an \"AS IS\"", "trim_filters=False): \"\"\" Print a filter to the growing MEME file", "options.meme_db), shell=True) # read in annotations filter_names = name_filters( num_filters,", "f (int) : filter index # filter_pwm (array) : filter", "of the filter's parameter matrix # out_pdf: ################################################################################ def filter_possum(param_matrix,", "print to the stats table. # # Input # param_matrix:", "value of 0.415. \"\"\" pseudoc = 1e-9 if transpose: pwm", "ti for ti in range(test_targets.shape[1])] if options.sample is not None:", "is not None: # choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]),", "1: annotation = name_pieces[1] # plot density of filter output", "= open('%s.fa' % out_prefix, 'w') filter_count = 0 for i", "%10s %5s %6s %6s' % header_cols, file=table_out) for f in", "uninformative suffix ic_end = filter_pwm.shape[0] - 1 while ic_end >=", "= meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in range(num_filters): print('Filter", "ic_start + 1, nsites), file=meme_out) for i in range(ic_start, ic_end", "= plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols", "def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute filter output means", "default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database used", "h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy", "< param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start])", "# name by number filter_names = ['f%d' % fi for", "range(num_filters): # collapse to a consensus motif consensus = filter_motif(filter_weights[f,", "if seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq =", "correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean')", "bg_gc] ic = 0 for i in range(pwm.shape[0]): for j", "while l / float(s) - (l / s) > 0:", "file=possum_out) for ci in range(trim_start, trim_end + 1): print( 'MA", "plt.close() return f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################ if __name__", "% (time.time() - t0)) # adjust for fourier job['fourier'] =", "= data_open['test_out'] try: target_names = list(data_open['target_labels']) except KeyError: target_names =", "data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets = data_open['test_out'] try:", "# predict # initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF(", "np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax =", "variables into session saver.restore(sess, model_file) # get weights filter_weights =", "seaborn as sns from sklearn import preprocessing import tensorflow as", "to switch to the true hg19 value of 0.415. \"\"\"", "0 and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]]", "the segments filter_outs_mean = filter_outs_seg.max(axis=3) # break each segment into", "if len(pwm_counts) == 0: # initialize with the length for", "+ 1]) < ic_t: ic_end -= 1 if ic_start <", "pwm_counts = [] nsites = 4 # pseudocounts for line", "number of filters tomtom_file (str) : filename of Tomtom output", "of filter activations in # # Input # param_matrix: np.array", "seqs [str] : list of strings for obtaining background freqs", "fasta file of positive outputs filter_fasta_out = open('%s.fa' % out_prefix,", "0] filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets))", "sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else: dist = 'cosine'", "plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs =", "motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein def", "in range(len(filter_names_live)): for ti in range(num_targets): cor, p = spearmanr(filter_outs_seq[:,", "compute and save information content filters_ic.append(info_content(filter_pwm)) # add to the", "# hash motifs and q-value's by filter filter_motifs = {}", "some reason, but taking # the max looks OK. Still,", "considering padding fstart = j - left_pad fend = fstart", "'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) # std", "matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0,", "'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot", "parameter matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix,", "of correlations between filter activations and # targets. # #", "filename of Tomtom output table. meme_db_file (str) : filename of", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "0 while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1])", "import preprocessing import tensorflow as tf import basenji ''' basenji_motifs.py", "- len(seqs[i])) # output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer,", "# pseudocounts for line in open(filter_fasta): if line[0] != '>':", "parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation", "'(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein", "filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start += 1 # trim", "-C \"#34459C\" C C' weblogo_opts += ' -C \"#FBB116\" G", "individual filter plots ################################################################# # also save information contents filters_ic", "seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean", "in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################", "suffix ic_end = filter_pwm.shape[0] - 1 while ic_end >= 0", "activations and # targets. # # Input # filter_outs: #", "plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################ # __main__", "NO --errorbars NO --fineprint \"\"' weblogo_opts += ' -C \"#CB2026\"", "1) // 2 right_pad = filter_size - left_pad # print", "''.join(motif_list) ################################################################################ # filter_possum # # Write a Possum-style motif", "output scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf'", "law or agreed to in writing, software # distributed under", "= 'euclidean' else: dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i],", "= param_matrix.shape[1] - 1 trim_t = 0.3 if trim_filters: #", "in left_pad if fstart < 0: kmer += 'N' *", "the first convolution layer of the given model using the", "meme_out def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name the filters using", "t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time", "range(len(filter_names_live)): for ti in range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi],", "linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng'", "information ################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w') # print", "in range(pwm.shape[0]): for j in range(4): # ic += 0.5", "l = filter_outs.shape[2] s = 5 while l / float(s)", "################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output')", "if filter_outs[i, j] > raw_t: # construct kmer kmer =", "- bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic = 0", "a = line.split() fi = int(a[0][6:]) motif_id = a[1] qval", "Name the filters using Tomtom matches. Attrs: num_filters (int) :", "= time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs =", "sequences. ''' weblogo_opts = '-X NO -Y NO --errorbars NO", "'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) # plot", "plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std()", "name_pieces[1] # plot density of filter output scores fmean, fstd", "len(args) != 3: parser.error( 'Must provide Basenji parameters and model", "of the filter's parameter matrix # out_pdf: ################################################################################ def filter_motif(param_matrix):", "limitations under the License. # ========================================================================= from __future__ import print_function", "< 0: kmer += 'N' * (-fstart) fstart = 0", "plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix,", "ic_start = 0 while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start", "initialize saver saver = tf.train.Saver() with tf.Session() as sess: #", "filter ends [Default: %default]' ) (options, args) = parser.parse_args() if", "0.3 if trim_filters: # trim PWM of uninformative prefix while", "print('%3s %19s %10s %5s %6s %6s' % header_cols, file=table_out) for", "also save information contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt'", "name_pieces = filter_names[f].split('_') if len(name_pieces) > 1: annotation = name_pieces[1]", "dpi=300) plt.close() ################################################################################ # filter_motif # # Collapse the filter", "<data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help=", "matrix # out_pdf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list", "1, 'G': 2, 'T': 3} # count nt_counts = [1]", "'-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir',", "% options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot", "plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file):", "optparse import OptionParser import copy, os, pdb, random, shutil, subprocess,", "output table. meme_db_file (str) : filename of MEME db Returns:", "may not use this file except in compliance with the", "# initialize saver saver = tf.train.Saver() with tf.Session() as sess:", "len(seqs[i])) # output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out)", "normalize nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum for", "this file except in compliance with the License. # You", "= a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute", "table. # # Input # param_matrix: np.array of the filter's", "'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def", "parameter matrix # out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False,", "import tensorflow as tf import basenji ''' basenji_motifs.py Collect statistics", "ci in range(trim_start, trim_end + 1): print( 'MA %s' %", "filter's occurrences # # Input # param_matrix: np.array of the", "l / s)) # mean across the segments filter_outs_mean =", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################ #", "ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat", "ic = 0 for i in range(pwm.shape[0]): for j in", "nt_freqs = [nt_counts[i] / nt_sum for i in range(4)] #", "describing filter activations in the test sequences [Default: %default]' )", "options.out_dir, 'w') # print header for later panda reading header_cols", "* all_outs_norm.max() + all_outs_mean left_pad = (filter_size - 1) //", "j in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: # construct", "main ################################################################################ def main(): usage = 'usage: %prog [options] <params_file>", "saver.restore(sess, model_file) # get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights =", "# plot_target_corr # # Plot a clustered heatmap of correlations", "% tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): \"\"\" Open", "# test t0 = time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test,", "# count for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1", "match for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] +=", "# print fasta file of positive outputs filter_fasta_out = open('%s.fa'", "# plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot", "0 for n in range(1, 4): if param_matrix[n, v] >", "matrix # out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200):", "################################################################# # also save information contents filters_ic = [] meme_out", "sequence filter_seqs = filter_outs.mean(axis=2) # whiten if whiten: filter_seqs =", "np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test t0 = time.time()", "nt in seqs[i]: try: nt_counts[nts[nt]] += 1 except KeyError: pass", "filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot filter-target correlation heatmap", "- 1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1])", "= abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range,", "'%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) # plot weblogo of high", "/ s)) # mean across the segments filter_outs_mean = filter_outs_seg.max(axis=3)", "contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs)", "# model parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length'] =", "trim_t: trim_start += 1 # trim PWM of uninformative suffix", "--errorbars NO --fineprint \"\"' weblogo_opts += ' -C \"#CB2026\" A", "format.' ) else: params_file = args[0] model_file = args[1] data_file", "np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f)) # whiten if", "cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] =", "sequences from the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False,", "################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0] f", "or implied. # See the License for the specific language", "param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ #", "========================================================================= from __future__ import print_function from optparse import OptionParser import", "3} pwm_counts = [] nsites = 4 # pseudocounts for", "out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1] l", "MEME DB file \"\"\" motif_protein = {} for line in", "%6s %6s' % header_cols, file=table_out) for f in range(num_filters): #", "(l / s) > 0: s += 1 print('%d segments", "hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i],", "matrix to a single DNA motif. # # Input #", "[str] : list of strings for obtaining background freqs Returns:", "%6.4f' % row_cols, file=table_out) table_out.close() ################################################################# # global filter plots", "sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test", ": filename seqs [str] : list of strings for obtaining", "saver = tf.train.Saver() with tf.Session() as sess: # load variables", "# global filter plots ################################################################# if options.plot_heats: # plot filter-sequence", "%.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out", "PWM of uninformative prefix ic_start = 0 while ic_start <", "and print intro Attrs: meme_file (str) : filename seqs [str]", "as np import pandas as pd from scipy.stats import spearmanr", "filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10))", "in range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi,", "i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i, j]", "matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy as np import", "seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png", "raw_t=0, maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean()", "parameter matrix to a single DNA motif. # # Input", "spearmanr import seaborn as sns from sklearn import preprocessing import", "w= %d nsites= %d' % (ic_end - ic_start + 1,", "top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names)", "def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t: all_outs", "trim_end]) < trim_t: trim_end -= 1 if trim_start < trim_end:", "= data_open['valid_out_imag'] ################################################################# # predict # initialize batcher if job['fourier']:", "1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out)", "uninformative prefix ic_start = 0 while ic_start < filter_pwm.shape[0] and", "basenji ''' basenji_motifs.py Collect statistics and make plots to explore", "provide Basenji parameters and model files and test data in", "kmer += seqs[i][fstart:fend] # if it ends in right_pad if", "f, file=meme_out) print( 'letter-probability matrix: alength= 4 w= %d nsites=", "left_pad if fstart < 0: kmer += 'N' * (-fstart)", "if fend > len(seqs[i]): kmer += 'N' * (fend -", "import basenji ''' basenji_motifs.py Collect statistics and make plots to", "as well as you # might expect. # # Input", "of uninformative prefix while trim_start < param_matrix.shape[1] and np.max( param_matrix[:,", "filter_count > 0: weblogo_cmd = 'weblogo %s < %s.fa >", "A' weblogo_opts += ' -C \"#34459C\" C C' weblogo_opts +=", "output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################", "filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std >", "################################################################# # print a table of information ################################################################# table_out =", "DNA motif. # # Input # param_matrix: np.array of the", "Still, similar motifs don't cluster quite as well as you", "assign filter's best match for fi in filter_motifs: top_motif =", "except KeyError: pwm_counts[i] += np.array([0.25] * 4) # normalize pwm_freqs", "the filter ends [Default: %default]' ) (options, args) = parser.parse_args()", "in # # Input # param_matrix: np.array of the filter's", "= len(target_names) if seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2) else:", "length %d' % (s, l / s)) # split into", "using Tomtom matches. Attrs: num_filters (int) : total number of", "motif_list = [] for v in range(param_matrix.shape[1]): max_n = 0", "positions off the filter ends [Default: %default]' ) (options, args)", "{'A': 0, 'C': 1, 'G': 2, 'T': 3} # count", "is not None and meme_db_file is not None: motif_protein =", "Print a filter to the growing MEME file Attrs: meme_out", "dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions off the filter ends", "(fend - len(seqs[i])) # output print('>%d_%d' % (i, j), file=filter_fasta_out)", "= np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3)", "scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' %", "annotation annotation = '.' name_pieces = filter_names[f].split('_') if len(name_pieces) >", "param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################", "filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0]", "Calico LLC # Licensed under the Apache License, Version 2.0", "parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME", "filter_pwm.shape[0] - 1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end +", "https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to", "explore the first convolution layer of the given model using", "given sequences. ''' weblogo_opts = '-X NO -Y NO --errorbars", "in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif]", "options.sample)) # filter test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i] #", "'%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to", "to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and", "annotation, filters_ic[f], fmean, fstd) print('%-3d %19s %10s %5.2f %6.4f %6.4f'", "for line in open(meme_db_file): a = line.split() if len(a) >", "filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# #", "parameter matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range =", "fstart < 0: kmer += 'N' * (-fstart) fstart =", "%s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) # read", "''' weblogo_opts = '-X NO -Y NO --errorbars NO --fineprint", "= layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters = filter_weights.shape[0]", "[] for v in range(param_matrix.shape[1]): max_n = 0 for n", "range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] += np.array([0.25]", "threshold (as proportion of max) to consider for PWM [Default:", "# name by protein if tomtom_file is not None and", "% (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density #", "if fstart < 0: kmer += 'N' * (-fstart) fstart", "f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters #################################################################", "filter from its top hits \"\"\" nts = {'A': 0,", "in writing, software # distributed under the License is distributed", "kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################ #", "% (mult * n) for n in param_matrix[:, ci]]), file=possum_out)", "sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat", "< trim_end: possum_out = open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN", "of strings for obtaining background freqs Returns: mem_out : open", "sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin,", "= time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds'", "xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png,", "a clustered heatmap of filter activations in sequence segments. #", "= sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 = test_seqs1[sample_i] test_targets =", "saver saver = tf.train.Saver() with tf.Session() as sess: # load", "# # Write a Possum-style motif # # Input #", "for line in open(filter_fasta): if line[0] != '>': seq =", "np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist = 'euclidean' else:", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "analysis, I ought to switch to the true hg19 value", "License, Version 2.0 (the \"License\"); # you may not use", "Write a Possum-style motif # # Input # param_matrix: np.array", "== 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else:", "'ACGT' motif_list = [] for v in range(param_matrix.shape[1]): max_n =", "sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ #", "99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False,", "line[0] != '>': seq = line.rstrip() nsites += 1 if", "weblogo_opts += ' -C \"#CB2026\" A A' weblogo_opts += '", "may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0", "kmer += 'N' * (-fstart) fstart = 0 # add", "maxpct_t * all_outs_norm.max() + all_outs_mean left_pad = (filter_size - 1)", "dest='sample', default=None, type='int', help='Sample sequences from the test set [Default:%default]')", "Plot a clustered heatmap of filter activations in sequence segments.", "open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s'", "seqs[i][fstart:fend] # if it ends in right_pad if fend >", "motif_protein = get_motif_proteins(meme_db_file) # hash motifs and q-value's by filter", "/ float(nsites) for j in range(4)]) return np.array(pwm_freqs), nsites -", "header_cols, file=table_out) for f in range(num_filters): # collapse to a", "all_outs - all_outs_mean raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean", "{'A': 0, 'C': 1, 'G': 2, 'T': 3} pwm_counts =", "default=None, help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option(", "np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True,", "the License for the specific language governing permissions and #", "+= -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc +", "= filter_outs_seq[:, filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std > 0]", "annotation = name_pieces[1] # plot density of filter output scores", "run tomtom subprocess.call( 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom", "param_matrix[:, trim_end]) < trim_t: trim_end -= 1 if trim_start <", "determine boundaries, considering padding fstart = j - left_pad fend", "used a bg_gc=0.5. For any future analysis, I ought to", "f in range(num_filters): # collapse to a consensus motif consensus", "def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute PWM information content. In", "else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize", "num_targets)) for fi in range(len(filter_names_live)): for ti in range(num_targets): cor,", "# trim PWM of uninformative prefix while trim_start < param_matrix.shape[1]", "1 if len(pwm_counts) == 0: # initialize with the length", "tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file):", "get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1,", "information filters_ic.append(0) else: # compute and save information content filters_ic.append(info_content(filter_pwm))", "if ic_start < ic_end: print('MOTIF filter%d' % f, file=meme_out) print(", "drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] #", "drop_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1] l = filter_outs.shape[2]", "= basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders job =", "metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png =", "try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] += np.array([0.25] *", "PWM for this filter from its top hits \"\"\" nts", "consider for PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None,", "a[2] return motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute PWM", "work well for the smaller segments for some reason, but", "plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmap", "= open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID", "1): print( 'MA %s' % ' '.join(['%.2f' % (mult *", "= [] for v in range(param_matrix.shape[1]): max_n = 0 for", "# distributed under the License is distributed on an \"AS", "# Unless required by applicable law or agreed to in", "s)) # mean across the segments filter_outs_mean = filter_outs_seg.max(axis=3) #", "'' # determine boundaries, considering padding fstart = j -", "name by protein if tomtom_file is not None and meme_db_file", "- 1) // 2 right_pad = filter_size - left_pad #", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "a weblogo of the filter's occurrences # # Input #", "-C \"#FBB116\" G G' weblogo_opts += ' -C \"#0C8040\" T", "predict # initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1,", "dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful", "print a table of information ################################################################# table_out = open('%s/table.txt' %", "tomtom_file, meme_db_file): \"\"\" Name the filters using Tomtom matches. Attrs:", "whiten=True, drop_dead=True): b = filter_outs.shape[0] f = filter_outs.shape[1] l =", "file and print intro Attrs: meme_file (str) : filename seqs", "out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # # Plot the", "* (-fstart) fstart = 0 # add primary sequence kmer", "f), options.trim_filters) # plot weblogo of high scoring outputs plot_filter_logo(", "the Apache License, Version 2.0 (the \"License\"); # you may", "maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm = all_outs", "fourier job['fourier'] = 'train_out_imag' in data_open if job['fourier']: test_targets_imag =", "# load data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets", "with tf.Session() as sess: # load variables into session saver.restore(sess,", "f)) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose", "'%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols = (f, consensus, annotation, filters_ic[f],", "if transpose: pwm = np.transpose(pwm) bg_pwm = [1 - bg_gc,", "f = filter_outs.shape[1] l = filter_outs.shape[2] s = 5 while", "layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape)", "param_matrix: np.array of the filter's parameter matrix # out_pdf: ################################################################################", "# possible trim trim_start = 0 trim_end = param_matrix.shape[1] -", "bg_gc, 1 - bg_gc] ic = 0 for i in", "# print intro material print('MEME version 4', file=meme_out) print('', file=meme_out)", "\"#0C8040\" T T' ################################################################################ # main ################################################################################ def main(): usage", "a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')]", "if trim_filters: # trim PWM of uninformative prefix while trim_start", "filter_names = ['f%d' % fi for fi in range(num_filters)] #", "# ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] *", "# write possum motif file filter_possum(filter_weights[f, :, :], 'filter%d' %", "header for later panda reading header_cols = ('', 'consensus', 'annotation',", "I used a bg_gc=0.5. For any future analysis, I ought", "filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if nsites", ":, :], '%s/filter%d_heat.pdf' % (options.out_dir, f)) # write possum motif", "np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm = all_outs - all_outs_mean raw_t", "+= 1 # trim PWM of uninformative suffix ic_end =", "the given sequences. ''' weblogo_opts = '-X NO -Y NO", "off the filter ends [Default: %default]' ) (options, args) =", "filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f)) #", "f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) # make", "'mean', 'std') print('%3s %19s %10s %5s %6s %6s' % header_cols,", "/ s) > 0: s += 1 print('%d segments of", "not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open = h5py.File(data_file)", "a clustered heatmap of filter activations in # # Input", "grab annotation annotation = '.' name_pieces = filter_names[f].split('_') if len(name_pieces)", "> 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)):", "= np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax", "in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)]) return", "any future analysis, I ought to switch to the true", "instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f))", "################################################################################ def filter_motif(param_matrix): nts = 'ACGT' motif_list = [] for", "# filter_names: # target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets,", "reason, but taking # the max looks OK. Still, similar", "and a[0] == 'MOTIF': if a[2][0] == '(': motif_protein[a[1]] =", "1 if ic_start < ic_end: print('MOTIF filter%d' % f, file=meme_out)", "a filter to the growing MEME file Attrs: meme_out :", "by number filter_names = ['f%d' % fi for fi in", "time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' %", "in range(1, 4): if param_matrix[n, v] > param_matrix[max_n, v]: max_n", "test_targets = test_targets[sample_i] # convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1)", "raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean left_pad = (filter_size", "' -C \"#34459C\" C C' weblogo_opts += ' -C \"#FBB116\"", "None: # choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) #", "= filter_outs.shape[0] num_targets = len(target_names) if seq_op == 'mean': filter_outs_seq", "filter_names[f].split('_') if len(name_pieces) > 1: annotation = name_pieces[1] # plot", "print('', file=meme_out) def meme_intro(meme_file, seqs): \"\"\" Open MEME motif format", "filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf)", "batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful variables", "# plot_filter_logo # # Plot a weblogo of the filter's", "of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs,", "ti in range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti])", "out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if", "for obtaining background freqs Returns: mem_out : open MEME file", "for i in range(len(seqs)): for nt in seqs[i]: try: nt_counts[nts[nt]]", "under the License is distributed on an \"AS IS\" BASIS,", "################################################################# # predict # initialize batcher if job['fourier']: batcher_test =", "filter's best match for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1]", "if nsites < 10: # no information filters_ic.append(0) else: #", "plots ################################################################# # also save information contents filters_ic = []", "ic_t: ic_end -= 1 if ic_start < ic_end: print('MOTIF filter%d'", "FLOAT', file=possum_out) print('ID %s' % motif_id, file=possum_out) print('AP DNA', file=possum_out)", "preprocessing import tensorflow as tf import basenji ''' basenji_motifs.py Collect", "= j - left_pad fend = fstart + filter_size #", "%.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs):", "motif_id)) tt_in.close() # assign filter's best match for fi in", "np.min( param_matrix[:, trim_start]) < trim_t: trim_start += 1 # trim", "numpy as np import pandas as pd from scipy.stats import", "in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count for i in", "1 # trim PWM of uninformative suffix ic_end = filter_pwm.shape[0]", "def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): \"\"\" Print a filter", "%.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file,", "1 except KeyError: pwm_counts[i] += np.array([0.25] * 4) # normalize", "\"\"\" Print a filter to the growing MEME file Attrs:", "you # might expect. # # Input # filter_outs ################################################################################", "j in range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic", "subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # # Plot the score", "split into multiple segments filter_outs_seg = np.reshape(filter_outs, (b, f, s,", "print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s' % motif_id,", "while ic_start < filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) <", "open(meme_file, 'w') # print intro material print('MEME version 4', file=meme_out)", "'N' * (-fstart) fstart = 0 # add primary sequence", "activations in # # Input # param_matrix: np.array of the", "= filter_outs.shape[2] s = 5 while l / float(s) -", "= filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) # std is sequence", "################################################################# # model parameters and placeholders job = basenji.dna_io.read_job_params(params_file) job['seq_length']", "file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make weblogo", "ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2(", "number filter_names = ['f%d' % fi for fi in range(num_filters)]", "in range(num_filters): print('Filter %d' % f) # plot filter parameters", "= filter_weights.shape[2] ################################################################# # individual filter plots ################################################################# # also", "weblogo_opts += ' -C \"#34459C\" C C' weblogo_opts += '", "= 4 # pseudocounts for line in open(filter_fasta): if line[0]", "= line.rstrip() nsites += 1 if len(pwm_counts) == 0: #", "np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure()", "trim_end -= 1 if trim_start < trim_end: possum_out = open(possum_file,", "= 5 while l / float(s) - (l / s)", "options.meme_db) ################################################################# # print a table of information ################################################################# table_out", "1 - bg_gc] ic = 0 for i in range(pwm.shape[0]):", "filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters =", "\"\"\" Open MEME motif format file and print intro Attrs:", "print('Filter %d' % f) # plot filter parameters as a", "# individual filter plots ################################################################# # also save information contents", "in range(param_matrix.shape[1]): max_n = 0 for n in range(1, 4):", "= ('', 'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s %19s %10s", "def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to protein names using the", "motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum # # Write a", "filter_possum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir, f),", "make plots to explore the first convolution layer of the", "options.sample is not None: # choose sampled indexes sample_i =", "%s' % motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE %d' %", "filter_size # if it starts in left_pad if fstart <", "seqs_i], 99.9) sns.set(font_scale=0.3) plt.figure() sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0,", "the filter parameter matrix to a single DNA motif. #", "sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax)", "1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm = [1 -", "v in range(param_matrix.shape[1]): max_n = 0 for n in range(1,", "default=False, action='store_true', help='Trim uninformative positions off the filter ends [Default:", "trim_t: trim_end -= 1 if trim_start < trim_end: possum_out =", "Possum-style motif # # Input # param_matrix: np.array of the", "ANY KIND, either express or implied. # See the License", "the License. # You may obtain a copy of the", "a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2]", "__future__ import print_function from optparse import OptionParser import copy, os,", "test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size,", "motif consensus = filter_motif(filter_weights[f, :, :]) # grab annotation annotation", "################################################################# # global filter plots ################################################################# if options.plot_heats: # plot", "nsites - 4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): \"\"\"", "# See the License for the specific language governing permissions", "tensorflow as tf import basenji ''' basenji_motifs.py Collect statistics and", "trim_start < param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:,", "filter_target_cors[fi, ti] = cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names)", "'-X NO -Y NO --errorbars NO --fineprint \"\"' weblogo_opts +=", "% ' '.join(['%.2f' % (mult * n) for n in", "meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in range(num_filters): print('Filter %d'", "intro Attrs: meme_file (str) : filename seqs [str] : list", "scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' %", "= int(a[0][6:]) motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval,", "################################################################# # individual filter plots ################################################################# # also save information", "trim_t = 0.3 if trim_filters: # trim PWM of uninformative", "filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) # std is sequence by", "meme_db_file (str) : filename of MEME db Returns: filter_names [str]", "+= 1 filter_fasta_out.close() # make weblogo if filter_count > 0:", "'consensus', 'annotation', 'ic', 'mean', 'std') print('%3s %19s %10s %5s %6s", "num_filters (int) : total number of filters tomtom_file (str) :", "0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True)", "% options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir,", "# Input # filter_outs: # filter_names: # target_names: # out_pdf:", "plot density of filter output scores fmean, fstd = plot_score_density(", "sklearn import preprocessing import tensorflow as tf import basenji '''", "print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background letter frequencies:',", "filter activations in # # Input # param_matrix: np.array of", "# main ################################################################################ def main(): usage = 'usage: %prog [options]", "maxpct_t=None): if maxpct_t: all_outs = np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm", "options.out_dir, whiten=False) # plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names,", "kmer kmer = '' # determine boundaries, considering padding fstart", "filter activations and # targets. # # Input # filter_outs:", "protein if tomtom_file is not None and meme_db_file is not", "= [nt_counts[i] / nt_sum for i in range(4)] # open", "# Plot a clustered heatmap of filter activations in #", "= test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert to letters test_seqs", "f in range(num_filters): print('Filter %d' % f) # plot filter", "file=table_out) for f in range(num_filters): # collapse to a consensus", "load data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets =", "0: s += 1 print('%d segments of length %d' %", "pwm[i][j]) return ic def make_filter_pwm(filter_fasta): \"\"\" Make a PWM for", "4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background", "downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:,", "= filter_outs.max(axis=2) # std is sequence by filter. filter_seqs_std =", "filters_ic.append(0) else: # compute and save information content filters_ic.append(info_content(filter_pwm)) #", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "2, 'T': 3} pwm_counts = [] nsites = 4 #", "names using the MEME DB file \"\"\" motif_protein = {}", "in right_pad if fend > len(seqs[i]): kmer += 'N' *", "# construct kmer kmer = '' # determine boundaries, considering", "writing, software # distributed under the License is distributed on", "filter_outs_mean = filter_outs_seg.max(axis=3) # break each segment into a new", "+= 1 except KeyError: pwm_counts[i] += np.array([0.25] * 4) #", "seqs): \"\"\" Open MEME motif format file and print intro", "sns from sklearn import preprocessing import tensorflow as tf import", "'T': 3} # count nt_counts = [1] * 4 for", "np.reshape(filter_outs, (b, f, s, l / s)) # mean across", "and q-value's by filter filter_motifs = {} tt_in = open(tomtom_file)", ":]) # grab annotation annotation = '.' name_pieces = filter_names[f].split('_')", "= basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test =", "# plot density of filter output scores fmean, fstd =", "filters using Tomtom matches. Attrs: num_filters (int) : total number", "motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr # # Plot a", "f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################ if __name__ == '__main__':", "filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std", "left_pad fend = fstart + filter_size # if it starts", "< filter_pwm.shape[0] and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start", "PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model", "= 'weblogo %s < %s.fa > %s.eps' % (weblogo_opts, out_prefix,", "- np.min( param_matrix[:, trim_end]) < trim_t: trim_end -= 1 if", "hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if whiten: dist =", "= np.ravel(filter_outs) all_outs_mean = all_outs.mean() all_outs_norm = all_outs - all_outs_mean", "# convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model", "# trim PWM of uninformative suffix ic_end = filter_pwm.shape[0] -", "v] > param_matrix[max_n, v]: max_n = n if param_matrix[max_n, v]", "= sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################", "Tomtom matches. Attrs: num_filters (int) : total number of filters", "well for the smaller segments for some reason, but taking", "if tomtom_file is not None and meme_db_file is not None:", "options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict # initialize batcher", "% ti for ti in range(test_targets.shape[1])] if options.sample is not", "+= ' -C \"#CB2026\" A A' weblogo_opts += ' -C", "filter_size - left_pad # print fasta file of positive outputs", "of the filter's parameters. # # Input # param_matrix: np.array", "= args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data", "in data_open if job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag", "plot_filter_heat # # Plot a heatmap of the filter's parameters.", "else: motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum # # Write", "= filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] # downsample sequences", "= np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1", "help= 'Activation threshold (as proportion of max) to consider for", "#out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ #", "plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10)", "500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i],", "= np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds", "tt_in.readline() for line in tt_in: a = line.split() fi =", "options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) #", "in open(meme_db_file): a = line.split() if len(a) > 0 and", "[] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f in", "test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver = tf.train.Saver()", "* 4) # normalize pwm_freqs = [] for i in", "# normalize pwm_freqs = [] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j]", "KeyError: target_names = ['t%d' % ti for ti in range(test_targets.shape[1])]", "= [] nsites = 4 # pseudocounts for line in", "= basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] =", "print(filter_outs.shape) # store useful variables num_filters = filter_weights.shape[0] filter_size =", "%6s' % header_cols, file=table_out) for f in range(num_filters): # collapse", "to explore the first convolution layer of the given model", "print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name", "[str] : \"\"\" # name by number filter_names = ['f%d'", "= plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # ,", "j in range(4)]) return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out,", "row_cols, file=table_out) table_out.close() ################################################################# # global filter plots ################################################################# if", "float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign filter's best match", "of filters tomtom_file (str) : filename of Tomtom output table.", "letter frequencies:', file=meme_out) print('A %.4f C %.4f G %.4f T", "np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds >", "0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j]) +", "= filter_pwm.shape[0] - 1 while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end", "%6.4f %6.4f' % row_cols, file=table_out) table_out.close() ################################################################# # global filter", "high scoring outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo'", "'%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot filter-target correlation heatmap plot_target_corr(filter_outs,", "import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy as", "% row_cols, file=table_out) table_out.close() ################################################################# # global filter plots #################################################################", "!= 3: parser.error( 'Must provide Basenji parameters and model files", "heatmap of filter activations in # # Input # param_matrix:", "nsites), file=meme_out) for i in range(ic_start, ic_end + 1): print('%.4f", "for some reason, but taking # the max looks OK.", "help= 'Plot heat maps describing filter activations in the test", "length for i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count", "open file for writing meme_out = open(meme_file, 'w') # print", "if line[0] != '>': seq = line.rstrip() nsites += 1", "filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline() for line in", "= name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print", "nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i] / nt_sum for i", "filter PWM array nsites (int) : number of filter sites", "of Tomtom output table. meme_db_file (str) : filename of MEME", "store useful variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] #################################################################", "annotate filters ################################################################# # run tomtom subprocess.call( 'tomtom -dist pearson", "args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load data data_open", "a table of information ################################################################# table_out = open('%s/table.txt' % options.out_dir,", "variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # individual", ":], '%s/filter%d_heat.pdf' % (options.out_dir, f)) # write possum motif file", "0] # downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin", "std is sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq =", "sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################", "= float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign filter's best", ":], 'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) #", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "default=None, type='int', help='Sample sequences from the test set [Default:%default]') parser.add_option(", "= n if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N')", "make weblogo if filter_count > 0: weblogo_cmd = 'weblogo %s", "all_outs_norm = all_outs - all_outs_mean raw_t = maxpct_t * all_outs_norm.max()", "= cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure()", "print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat # #", "panda reading header_cols = ('', 'consensus', 'annotation', 'ic', 'mean', 'std')", "# # Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True,", "+= ' -C \"#0C8040\" T T' ################################################################################ # main ################################################################################", "the filter filter_pwm, nsites = make_filter_pwm('%s/filter%d_logo.fa' % (options.out_dir, f)) if", "ic_t = 0.2 # trim PWM of uninformative prefix ic_start", "param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start]) < trim_t: trim_start +=", "print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def", "0 ic_end = filter_pwm.shape[0] - 1 else: ic_t = 0.2", "trim_start), file=possum_out) for ci in range(trim_start, trim_end + 1): print(", "(s * b, f)) # whiten if whiten: filter_seqs =", "the filters using Tomtom matches. Attrs: num_filters (int) : total", "for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError:", "params_file = args[0] model_file = args[1] data_file = args[2] if", "''' basenji_motifs.py Collect statistics and make plots to explore the", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "if options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict # initialize", "# out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute", "dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' % (time.time()", "if it ends in right_pad if fend > len(seqs[i]): kmer", "spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor cor_df =", "trim PWM of uninformative suffix while trim_end >= 0 and", "PWM information content. In the original analysis, I used a", "filter_seqs = filter_outs.mean(axis=2) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs)", "return np.array(filter_names) ################################################################################ # plot_target_corr # # Plot a clustered", "mult=200): # possible trim trim_start = 0 trim_end = param_matrix.shape[1]", "= preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds", "motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# #", "# make a PWM for the filter filter_pwm, nsites =", "bg_gc=0.5. For any future analysis, I ought to switch to", "# plot_filter_heat # # Plot a heatmap of the filter's", "os.environ['BASENJIDIR'], help='MEME database used to annotate motifs') parser.add_option( '-p', dest='plot_heats',", "= int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job)", "4 # pseudocounts for line in open(filter_fasta): if line[0] !=", "and model files and test data in HDF5' ' format.'", "no information filters_ic.append(0) else: # compute and save information content", "# plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf'", "heat maps describing filter activations in the test sequences [Default:", "nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters ################################################################# # run", "= ['t%d' % ti for ti in range(test_targets.shape[1])] if options.sample", "meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate filters", "test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1,", "seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' % options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\"", "test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t) # make a PWM", "specific language governing permissions and # limitations under the License.", "motif format file and print intro Attrs: meme_file (str) :", "0: # initialize with the length for i in range(len(seq)):", "= test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] =", "for i in range(4)] # open file for writing meme_out", "test t0 = time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0])", "filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' %", "\"\"\" Make a PWM for this filter from its top", "= {} tt_in = open(tomtom_file) tt_in.readline() for line in tt_in:", "all_outs_mean = all_outs.mean() all_outs_norm = all_outs - all_outs_mean raw_t =", "0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t: ic_end -= 1", "len(a) > 0 and a[0] == 'MOTIF': if a[2][0] ==", "# transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1)", "seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor cor_df = pd.DataFrame( filter_target_cors,", "possum motif file filter_possum(filter_weights[f, :, :], 'filter%d' % f, '%s/filter%d_possum.txt'", "filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f), maxpct_t=options.act_t)", "as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' %", "sns.heatmap( param_matrix, cmap='PRGn', linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1,", "% (options.out_dir, f), options.trim_filters) # plot weblogo of high scoring", "of 0.415. \"\"\" pseudoc = 1e-9 if transpose: pwm =", "'G': 2, 'T': 3} # count nt_counts = [1] *", "# you may not use this file except in compliance", "the smaller segments for some reason, but taking # the", "[Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model output", "open('%s/table.txt' % options.out_dir, 'w') # print header for later panda", "in open(filter_fasta): if line[0] != '>': seq = line.rstrip() nsites", "a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm, transpose=False,", "open(tomtom_file) tt_in.readline() for line in tt_in: a = line.split() fi", "copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required", "kmer += 'N' * (fend - len(seqs[i])) # output print('>%d_%d'", "(i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() #", "int(a[0][6:]) motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id))", "transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs", "1 except KeyError: pass # normalize nt_sum = float(sum(nt_counts)) nt_freqs", "a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir, f)) #", "for line in tt_in: a = line.split() fi = int(a[0][6:])", "in tt_in: a = line.split() fi = int(a[0][6:]) motif_id =", "boundaries, considering padding fstart = j - left_pad fend =", "job['fourier'] = 'train_out_imag' in data_open if job['fourier']: test_targets_imag = data_open['test_out_imag']", "cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df,", "#!/usr/bin/env python # Copyright 2017 Calico LLC # Licensed under", "range(num_targets): cor, p = spearmanr(filter_outs_seq[:, fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti]", "mean across the segments filter_outs_mean = filter_outs_seg.max(axis=3) # break each", "filter_outs_seq[:, filter_seqs_std > 0] filter_names_live = filter_names[filter_seqs_std > 0] filter_target_cors", "filters_ic[f], fmean, fstd) print('%-3d %19s %10s %5.2f %6.4f %6.4f' %", "of the filter's occurrences # # Input # param_matrix: np.array", "- (l / s) > 0: s += 1 print('%d", "2 right_pad = filter_size - left_pad # print fasta file", "stats table. # # Input # param_matrix: np.array of the", "trim_start]) - np.min( param_matrix[:, trim_start]) < trim_t: trim_start += 1", "# ========================================================================= from __future__ import print_function from optparse import OptionParser", "as sns from sklearn import preprocessing import tensorflow as tf", "Collect statistics and make plots to explore the first convolution", "job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 = time.time()", "tt_in.close() # assign filter's best match for fi in filter_motifs:", ">= 0 and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end]) <", "for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in", "for n in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out)", "= 0 ic_end = filter_pwm.shape[0] - 1 else: ic_t =", "% (ic_end - ic_start + 1, nsites), file=meme_out) for i", "seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names,", "* 4)) # count for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]]", "pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)]) return np.array(pwm_freqs), nsites", "plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################", "################################################################################ # plot_target_corr # # Plot a clustered heatmap of", "len(seqs[i]): kmer += 'N' * (fend - len(seqs[i])) # output", "under the Apache License, Version 2.0 (the \"License\"); # you", "the filter's occurrences # # Input # param_matrix: np.array of", "row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d %19s", "num_seqs = filter_outs.shape[0] num_targets = len(target_names) if seq_op == 'mean':", "+= ' -C \"#34459C\" C C' weblogo_opts += ' -C", "(options.out_dir, f)) # write possum motif file filter_possum(filter_weights[f, :, :],", "= filter_pwm.shape[0] - 1 else: ic_t = 0.2 # trim", "'%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf' %", "= filter_size - left_pad # print fasta file of positive", "batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver = tf.train.Saver() with tf.Session()", "out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # # Plot", "batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag, batch_size=dr.batch_size,", "%default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed model output as", "print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('',", "job = basenji.dna_io.read_job_params(params_file) job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets']", "pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) #", "writing meme_out = open(meme_file, 'w') # print intro material print('MEME", "(int) : number of filter sites \"\"\" if not trim_filters:", "type='float', help= 'Activation threshold (as proportion of max) to consider", "out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs", "= data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict", "param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close()", "1, 0]) print(filter_weights.shape) # test t0 = time.time() layer_filter_outs, _", ": open MEME file \"\"\" nts = {'A': 0, 'C':", "motif_protein def info_content(pwm, transpose=False, bg_gc=0.415): \"\"\" Compute PWM information content.", "density of filter output scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:,", "and # targets. # # Input # filter_outs: # filter_names:", "'Plot heat maps describing filter activations in the test sequences", "0, 'C': 1, 'G': 2, 'T': 3} # count nt_counts", "'train_out_imag' in data_open if job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid:", "% options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir)", "for i in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i,", "i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)])", "test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative", "well as you # might expect. # # Input #", "heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs,", "bg_pwm = [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc]", "i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count for i", "< trim_t: trim_start += 1 # trim PWM of uninformative", "-= 1 if trim_start < trim_end: possum_out = open(possum_file, 'w')", "np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end]) < trim_t: trim_end -=", "prefix while trim_start < param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) -", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "nts = 'ACGT' motif_list = [] for v in range(param_matrix.shape[1]):", "job['seq_length'] = test_seqs1.shape[1] job['seq_depth'] = test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool']", "%s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density", "test_targets, test_targets_imag, batch_size=dr.batch_size, pool_width=job['target_pool']) else: batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets,", "{} for line in open(meme_db_file): a = line.split() if len(a)", "# print a table of information ################################################################# table_out = open('%s/table.txt'", "= filter_names[filter_seqs_std > 0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi", "load variables into session saver.restore(sess, model_file) # get weights filter_weights", "nsites = 4 # pseudocounts for line in open(filter_fasta): if", "plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets,", "filter activations in sequence segments. # # Mean doesn't work", "a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless", "while trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:,", "trim_end]) - np.min( param_matrix[:, trim_end]) < trim_t: trim_end -= 1", "seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1)", "* b, f)) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs)", "a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close() # assign", "+= 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j])", "except KeyError: target_names = ['t%d' % ti for ti in", "across the segments filter_outs_mean = filter_outs_seg.max(axis=3) # break each segment", "basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' % (time.time() - t0))", ": filter index # filter_pwm (array) : filter PWM array", "file=meme_out) for i in range(ic_start, ic_end + 1): print('%.4f %.4f", "len(pwm_counts) == 0: # initialize with the length for i", "s) > 0: s += 1 print('%d segments of length", "test sequences [Default: %default]' ) parser.add_option( '-s', dest='sample', default=None, type='int',", "= '' # determine boundaries, considering padding fstart = j", "options.out_dir, test_seqs) for f in range(num_filters): print('Filter %d' % f)", "(b, f, s, l / s)) # mean across the", "out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1],", "= {} for line in open(meme_db_file): a = line.split() if", "test_seqs1[sample_i] test_targets = test_targets[sample_i] # convert to letters test_seqs =", "for PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file', default=None, help='Pre-computed", "segments. # # Mean doesn't work well for the smaller", "Plot a weblogo of the filter's occurrences # # Input", "target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_max.pdf'", "plot filter parameters as a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf'", "# std is sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq", "5 while l / float(s) - (l / s) >", "HDF5' ' format.' ) else: params_file = args[0] model_file =", "num_targets = len(target_names) if seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2)", "(str) : filename seqs [str] : list of strings for", "file \"\"\" nts = {'A': 0, 'C': 1, 'G': 2,", "= out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat", "motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE %d' % (trim_end +", "pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic += -bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] *", "-Y NO --errorbars NO --fineprint \"\"' weblogo_opts += ' -C", "'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif # # Collapse", "new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b,", "# Input # filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True):", "if param_matrix[n, v] > param_matrix[max_n, v]: max_n = n if", "a heatmap of the filter's parameters. # # Input #", "raw_t: # construct kmer kmer = '' # determine boundaries,", "% (options.out_dir, f), maxpct_t=options.act_t) # make a PWM for the", "and np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start]) < trim_t:", "of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs,", "- 1 else: ic_t = 0.2 # trim PWM of", "filter index # filter_pwm (array) : filter PWM array nsites", "filter%d' % f, file=meme_out) print( 'letter-probability matrix: alength= 4 w=", "type='int', help='Sample sequences from the test set [Default:%default]') parser.add_option( '-t',", "plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist, row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin,", "Hash motif_id's to protein names using the MEME DB file", "l / float(s) - (l / s) > 0: s", "def filter_motif(param_matrix): nts = 'ACGT' motif_list = [] for v", "* n) for n in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out)", "1))) t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model building", "Attrs: meme_file (str) : filename seqs [str] : list of", "in range(trim_start, trim_end + 1): print( 'MA %s' % '", "* (fend - len(seqs[i])) # output print('>%d_%d' % (i, j),", "file=possum_out) print('AP DNA', file=possum_out) print('LE %d' % (trim_end + 1", "line in open(filter_fasta): if line[0] != '>': seq = line.rstrip()", "plot_filter_logo # # Plot a weblogo of the filter's occurrences", "filter filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline() for line", "# # Plot a clustered heatmap of correlations between filter", "filter plots ################################################################# if options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs,", "(filter_size - 1) // 2 right_pad = filter_size - left_pad", "= h5py.File(data_file) test_seqs1 = data_open['test_in'] test_targets = data_open['test_out'] try: target_names", "plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False)", "session saver.restore(sess, model_file) # get weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights", "= 0.2 # trim PWM of uninformative prefix ic_start =", "initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets, test_targets_imag,", "in the test sequences [Default: %default]' ) parser.add_option( '-s', dest='sample',", "heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir,", "trim_filters=False, mult=200): # possible trim trim_start = 0 trim_end =", "f) # plot filter parameters as a heatmap plot_filter_heat(filter_weights[f, :,", "Apache License, Version 2.0 (the \"License\"); # you may not", "matplotlib.pyplot as plt import numpy as np import pandas as", "range(num_filters): print('Filter %d' % f) # plot filter parameters as", "= 'usage: %prog [options] <params_file> <model_file> <data_file>' parser = OptionParser(usage)", "def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap(", "either express or implied. # See the License for the", "2017 Calico LLC # Licensed under the Apache License, Version", "> 0: weblogo_cmd = 'weblogo %s < %s.fa > %s.eps'", "from scipy.stats import spearmanr import seaborn as sns from sklearn", "filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for", "> 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum", "trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0] - 1 else:", "A A' weblogo_opts += ' -C \"#34459C\" C C' weblogo_opts", "using the given sequences. ''' weblogo_opts = '-X NO -Y", "file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s' % motif_id, file=possum_out) print('AP", "% (options.out_dir, f)) # write possum motif file filter_possum(filter_weights[f, :,", "and print to the stats table. # # Input #", "name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name the filters using Tomtom matches.", "by filter filter_motifs = {} tt_in = open(tomtom_file) tt_in.readline() for", "- all_outs_mean raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean left_pad", "meme_out.close() ################################################################# # annotate filters ################################################################# # run tomtom subprocess.call(", "file for writing meme_out = open(meme_file, 'w') # print intro", "default=False, action='store_true', help= 'Plot heat maps describing filter activations in", "the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable", "db Returns: filter_names [str] : \"\"\" # name by number", "bg_gc, bg_gc, 1 - bg_gc] ic = 0 for i", "permissions and # limitations under the License. # ========================================================================= from", "tomtom subprocess.call( 'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt", "0 and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end]) < trim_t:", "\"#34459C\" C C' weblogo_opts += ' -C \"#FBB116\" G G'", "scipy.stats import spearmanr import seaborn as sns from sklearn import", "filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True,", "-C \"#CB2026\" A A' weblogo_opts += ' -C \"#34459C\" C", "fstart + filter_size # if it starts in left_pad if", "file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make weblogo if filter_count", "true hg19 value of 0.415. \"\"\" pseudoc = 1e-9 if", "test data in HDF5' ' format.' ) else: params_file =", "taking # the max looks OK. Still, similar motifs don't", "pseudoc = 1e-9 if transpose: pwm = np.transpose(pwm) bg_pwm =", "'-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold (as proportion of", "trim_start < trim_end: possum_out = open(possum_file, 'w') print('BEGIN GROUP', file=possum_out)", "# choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter", "f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols = (f, consensus, annotation,", "read in annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir,", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver saver", "the test sequences [Default: %default]' ) parser.add_option( '-s', dest='sample', default=None,", "file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): \"\"\" Open MEME motif", "tf.train.Saver() with tf.Session() as sess: # load variables into session", "% (options.out_dir, f)) row_cols = (f, consensus, annotation, filters_ic[f], fmean,", "%s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) # read in annotations", "transpose=False, bg_gc=0.415): \"\"\" Compute PWM information content. In the original", ") else: params_file = args[0] model_file = args[1] data_file =", "test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag'] ################################################################# #", "fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' %", "/ float(s) - (l / s) > 0: s +=", "options.out_dir, options.meme_db) ################################################################# # print a table of information #################################################################", "+ 1): print( 'MA %s' % ' '.join(['%.2f' % (mult", "(options.out_dir, f)) row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd)", "pwm = np.transpose(pwm) bg_pwm = [1 - bg_gc, bg_gc, bg_gc,", "from optparse import OptionParser import copy, os, pdb, random, shutil,", "os.mkdir(options.out_dir) ################################################################# # load data data_open = h5py.File(data_file) test_seqs1 =", "motifs don't cluster quite as well as you # might", "ic_end = filter_pwm.shape[0] - 1 while ic_end >= 0 and", "heatmap of the filter's parameters. # # Input # param_matrix:", "(options, args) = parser.parse_args() if len(args) != 3: parser.error( 'Must", "= all_outs.mean() all_outs_norm = all_outs - all_outs_mean raw_t = maxpct_t", "+= ' -C \"#FBB116\" G G' weblogo_opts += ' -C", "################################################################# if options.plot_heats: # plot filter-sequence heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' %", "################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute filter output", "ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo", "plt.close() ################################################################################ # plot_filter_logo # # Plot a weblogo of", "= 0 for i in range(pwm.shape[0]): for j in range(4):", "################################################################# # load data data_open = h5py.File(data_file) test_seqs1 = data_open['test_in']", "information content filters_ic.append(info_content(filter_pwm)) # add to the meme motif file", "filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir,", "< trim_t: trim_end -= 1 if trim_start < trim_end: possum_out", "= dr.hidden(sess, batcher_test, layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store", "# plot_score_density # # Plot the score density and print", "For any future analysis, I ought to switch to the", "padding fstart = j - left_pad fend = fstart +", "# # Plot the score density and print to the", "for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s'", "the length for i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) #", "line.rstrip() nsites += 1 if len(pwm_counts) == 0: # initialize", "'ic', 'mean', 'std') print('%3s %19s %10s %5s %6s %6s' %", "'.' name_pieces = filter_names[f].split('_') if len(name_pieces) > 1: annotation =", "all_outs_mean raw_t = maxpct_t * all_outs_norm.max() + all_outs_mean left_pad =", "= [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir, test_seqs) for f", "%19s %10s %5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close() #################################################################", "as sess: # load variables into session saver.restore(sess, model_file) #", "3: parser.error( 'Must provide Basenji parameters and model files and", "= open(tomtom_file) tt_in.readline() for line in tt_in: a = line.split()", "in range(4): # ic += 0.5 + pwm[i][j]*np.log2(pseudoc+pwm[i][j]) ic +=", "array nsites (int) : number of filter sites \"\"\" if", "trim_end: possum_out = open(possum_file, 'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT',", "parameter matrix # out_pdf: ################################################################################ def filter_motif(param_matrix): nts = 'ACGT'", "filter_possum(param_matrix, motif_id, possum_file, trim_filters=False, mult=200): # possible trim trim_start =", "# plot filter parameters as a heatmap plot_filter_heat(filter_weights[f, :, :],", "uninformative positions off the filter ends [Default: %default]' ) (options,", "sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i],", "= sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) #", "sequence segments. # # Mean doesn't work well for the", "filename of MEME db Returns: filter_names [str] : \"\"\" #", "if drop_dead: filter_stds = filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0]", "# # Plot a heatmap of the filter's parameters. #", "%d' % (ic_end - ic_start + 1, nsites), file=meme_out) for", "-oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) #", "strings for obtaining background freqs Returns: mem_out : open MEME", "'euclidean' else: dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:, seqs_i], metric=dist,", "0 trim_end = param_matrix.shape[1] - 1 trim_t = 0.3 if", "letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# # model parameters and placeholders", "correlations between filter activations and # targets. # # Input", "Attrs: meme_out : open file f (int) : filter index", "= args[0] model_file = args[1] data_file = args[2] if not", "table_out = open('%s/table.txt' % options.out_dir, 'w') # print header for", "(ic_end - ic_start + 1, nsites), file=meme_out) for i in", "shell=True) ################################################################################ # plot_score_density # # Plot the score density", "whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs =", "for i in range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count for", "given model using the given sequences. ''' weblogo_opts = '-X", "= [] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for", "GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s' % motif_id, file=possum_out)", "as pd from scipy.stats import spearmanr import seaborn as sns", "use this file except in compliance with the License. #", "[Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions off", "add to the meme motif file meme_add(meme_out, f, filter_pwm, nsites,", "data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) ################################################################# # load", "%d' % f) # plot filter parameters as a heatmap", "but taking # the max looks OK. Still, similar motifs", "PWM of uninformative prefix while trim_start < param_matrix.shape[1] and np.max(", "sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################", "bg_gc=0.415): \"\"\" Compute PWM information content. In the original analysis,", "f)) if nsites < 10: # no information filters_ic.append(0) else:", "i in range(pwm.shape[0]): for j in range(4): # ic +=", "import spearmanr import seaborn as sns from sklearn import preprocessing", "(time.time() - t0)) # adjust for fourier job['fourier'] = 'train_out_imag'", "uninformative prefix while trim_start < param_matrix.shape[1] and np.max( param_matrix[:, trim_start])", "= (filter_size - 1) // 2 right_pad = filter_size -", "% os.environ['BASENJIDIR'], help='MEME database used to annotate motifs') parser.add_option( '-p',", "1 trim_t = 0.3 if trim_filters: # trim PWM of", "if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs)", "left_pad = (filter_size - 1) // 2 right_pad = filter_size", "for i in range(pwm.shape[0]): for j in range(4): # ic", "matrix # out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores,", "\"\"\" if not trim_filters: ic_start = 0 ic_end = filter_pwm.shape[0]", "int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr = basenji.seqnn.SeqNN() dr.build(job) print('Model", "clustered heatmap of correlations between filter activations and # targets.", "not None and meme_db_file is not None: motif_protein = get_motif_proteins(meme_db_file)", "ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1)) ax.set_yticklabels('TGCA', rotation='horizontal') #", "segments filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s))", "file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\"", "def make_filter_pwm(filter_fasta): \"\"\" Make a PWM for this filter from", "weblogo_opts += ' -C \"#0C8040\" T T' ################################################################################ # main", "+ pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta):", "(array) : filter PWM array nsites (int) : number of", "# Plot a clustered heatmap of filter activations in sequence", "sns.clustermap( filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf)", "get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to protein names using the MEME", "set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim uninformative positions", "filter_motif(filter_weights[f, :, :]) # grab annotation annotation = '.' name_pieces", "all_outs.mean() all_outs_norm = all_outs - all_outs_mean raw_t = maxpct_t *", "# determine boundaries, considering padding fstart = j - left_pad", "information contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' % options.out_dir,", "open file f (int) : filter index # filter_pwm (array)", "Returns: mem_out : open MEME file \"\"\" nts = {'A':", "print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat # # Plot a", "f)) # write possum motif file filter_possum(filter_weights[f, :, :], 'filter%d'", "== '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return", "def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b = filter_outs.shape[0] f =", "range(len(seq)): pwm_counts.append(np.array([1.0] * 4)) # count for i in range(len(seq)):", "# no information filters_ic.append(0) else: # compute and save information", "filter_pwm, nsites, trim_filters=False): \"\"\" Print a filter to the growing", "%.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters,", "help='MEME database used to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False,", "in compliance with the License. # You may obtain a", "j] > raw_t: # construct kmer kmer = '' #", "MEME db Returns: filter_names [str] : \"\"\" # name by", "meme motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() #################################################################", "software # distributed under the License is distributed on an", "motif_id, possum_file, trim_filters=False, mult=200): # possible trim trim_start = 0", "'w') print('BEGIN GROUP', file=possum_out) print('BEGIN FLOAT', file=possum_out) print('ID %s' %", "filter's parameter matrix # out_pdf: ################################################################################ def filter_motif(param_matrix): nts =", "np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols = (f,", "material print('MEME version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out)", "[options] <params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t',", "f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) # plot weblogo of", "%s < %s.fa > %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd,", "files and test data in HDF5' ' format.' ) else:", "* np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return", "file=table_out) table_out.close() ################################################################# # global filter plots ################################################################# if options.plot_heats:", "param_matrix[:, trim_start]) < trim_t: trim_start += 1 # trim PWM", "< 10: # no information filters_ic.append(0) else: # compute and", "to a single DNA motif. # # Input # param_matrix:", "version 4', file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out)", "10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a", "alength= 4 w= %d nsites= %d' % (ic_end - ic_start", "# https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed", "tf.Session() as sess: # load variables into session saver.restore(sess, model_file)", "= np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape) # test t0 =", "len(target_names) if seq_op == 'mean': filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq", "nt_sum for i in range(4)] # open file for writing", "filter_seqs[:, seqs_i], row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png", "ic_start < ic_end: print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability", "--fineprint \"\"' weblogo_opts += ' -C \"#CB2026\" A A' weblogo_opts", "(options.out_dir, f)) if nsites < 10: # no information filters_ic.append(0)", "test_targets[sample_i] # convert to letters test_seqs = basenji.dna_io.hot1_dna(test_seqs1) ################################################################# #", "uninformative suffix while trim_end >= 0 and np.max(param_matrix[:, trim_end]) -", "filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count = 0 for", "return ic def make_filter_pwm(filter_fasta): \"\"\" Make a PWM for this", "filter_fasta_out.close() # make weblogo if filter_count > 0: weblogo_cmd =", "DB file \"\"\" motif_protein = {} for line in open(meme_db_file):", "best match for fi in filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi]", "print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make weblogo if", "n in param_matrix[:, ci]]), file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close()", "filter_weights.shape[2] ################################################################# # individual filter plots ################################################################# # also save", "linewidths=0.2, vmin=-param_range, vmax=param_range) ax = plt.gca() ax.set_xticklabels(range(1, param_matrix.shape[1] + 1))", "- ic_start + 1, nsites), file=meme_out) for i in range(ic_start,", "help='Sample sequences from the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters',", "pwm_counts.append(np.array([1.0] * 4)) # count for i in range(len(seq)): try:", "motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list) ################################################################################ # filter_possum # #", "License. # ========================================================================= from __future__ import print_function from optparse import", "layers=[0]) filter_outs = layer_filter_outs[0] print(filter_outs.shape) # store useful variables num_filters", "file=meme_out) def meme_intro(meme_file, seqs): \"\"\" Open MEME motif format file", "= list(data_open['target_labels']) except KeyError: target_names = ['t%d' % ti for", "to a consensus motif consensus = filter_motif(filter_weights[f, :, :]) #", "\"\"' weblogo_opts += ' -C \"#CB2026\" A A' weblogo_opts +=", "= filter_seqs[filter_stds > 0] # downsample sequences seqs_i = np.random.randint(0,", "the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf,", "# collapse to a consensus motif consensus = filter_motif(filter_weights[f, :,", "plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot a clustered", "f_scores.std() ################################################################################ # __main__ ################################################################################ if __name__ == '__main__': main()", "OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold (as", "figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat # # Plot", "weblogo if filter_count > 0: weblogo_cmd = 'weblogo %s <", "= 'train_out_imag' in data_open if job['fourier']: test_targets_imag = data_open['test_out_imag'] if", "filter's parameter matrix # out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf): sns.set(font_scale=1.3)", "with the License. # You may obtain a copy of", "parameters and model files and test data in HDF5' '", "list(data_open['target_labels']) except KeyError: target_names = ['t%d' % ti for ti", "annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat", "################################################################################ # plot_filter_heat # # Plot a heatmap of the", "ti] = cor cor_df = pd.DataFrame( filter_target_cors, index=filter_names_live, columns=target_names) sns.set(font_scale=0.3)", "as tf import basenji ''' basenji_motifs.py Collect statistics and make", "# filter_outs ################################################################################ def plot_filter_seg_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): b =", "1), (s * b, f)) # whiten if whiten: filter_seqs", "# store useful variables num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2]", "fi in range(len(filter_names_live)): for ti in range(num_targets): cor, p =", "nts = {'A': 0, 'C': 1, 'G': 2, 'T': 3}", "# initialize batcher if job['fourier']: batcher_test = basenji.batcher.BatcherF( test_seqs1, test_targets,", "filter_possum # # Write a Possum-style motif # # Input", "consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d %19s %10s %5.2f %6.4f", "Basenji parameters and model files and test data in HDF5'", "annotation = '.' name_pieces = filter_names[f].split('_') if len(name_pieces) > 1:", "% options.out_dir, options.meme_db) ################################################################# # print a table of information", "in range(num_filters)] # name by protein if tomtom_file is not", "' format.' ) else: params_file = args[0] model_file = args[1]", "s)) # split into multiple segments filter_outs_seg = np.reshape(filter_outs, (b,", "motif_id = a[1] qval = float(a[5]) filter_motifs.setdefault(fi, []).append((qval, motif_id)) tt_in.close()", "return meme_out def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name the filters", "################################################################################ # filter_possum # # Write a Possum-style motif #", ":, :], 'filter%d' % f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters)", "' -C \"#0C8040\" T T' ################################################################################ # main ################################################################################ def", "express or implied. # See the License for the specific", "% (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close()", "except in compliance with the License. # You may obtain", "meme_file (str) : filename seqs [str] : list of strings", "while trim_start < param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) - np.min(", "# plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs,", "possible trim trim_start = 0 trim_end = param_matrix.shape[1] - 1", "# add primary sequence kmer += seqs[i][fstart:fend] # if it", "float(nsites) for j in range(4)]) return np.array(pwm_freqs), nsites - 4", "% tuple(nt_freqs), file=meme_out) print('', file=meme_out) return meme_out def name_filters(num_filters, tomtom_file,", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "ic_start += 1 # trim PWM of uninformative suffix ic_end", "trim_end = param_matrix.shape[1] - 1 trim_t = 0.3 if trim_filters:", "the max looks OK. Still, similar motifs don't cluster quite", "for j in range(4)]) return np.array(pwm_freqs), nsites - 4 def", "range(param_matrix.shape[1]): max_n = 0 for n in range(1, 4): if", "filter_names, target_names, '%s/filter_target_cors_mean.pdf' % options.out_dir, 'mean') plot_target_corr(filter_outs, seq_targets, filter_names, target_names,", "# Input # param_matrix: np.array of the filter's parameter matrix", "don't cluster quite as well as you # might expect.", "DNA', file=possum_out) print('LE %d' % (trim_end + 1 - trim_start),", "if param_matrix[max_n, v] > 0: motif_list.append(nts[max_n]) else: motif_list.append('N') return ''.join(motif_list)", "print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability matrix: alength= 4", "CONDITIONS OF ANY KIND, either express or implied. # See", "convolution layer of the given model using the given sequences.", "T T' ################################################################################ # main ################################################################################ def main(): usage =", "switch to the true hg19 value of 0.415. \"\"\" pseudoc", "\"\"\" Hash motif_id's to protein names using the MEME DB", "whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if", "save information contents filters_ic = [] meme_out = meme_intro('%s/filters_meme.txt' %", "os, pdb, random, shutil, subprocess, time import h5py import matplotlib", "trim PWM of uninformative suffix ic_end = filter_pwm.shape[0] - 1", "filter's parameter matrix # out_pdf: ################################################################################ def filter_possum(param_matrix, motif_id, possum_file,", "trim_filters: # trim PWM of uninformative prefix while trim_start <", "trim_end + 1): print( 'MA %s' % ' '.join(['%.2f' %", "single DNA motif. # # Input # param_matrix: np.array of", "dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'], help='MEME database", "line in tt_in: a = line.split() fi = int(a[0][6:]) motif_id", "plt.close() ################################################################################ # filter_motif # # Collapse the filter parameter", "per sequence filter_seqs = filter_outs.mean(axis=2) # whiten if whiten: filter_seqs", "weblogo_opts += ' -C \"#FBB116\" G G' weblogo_opts += '", "consensus = filter_motif(filter_weights[f, :, :]) # grab annotation annotation =", "Plot the score density and print to the stats table.", "filter_outs.shape[2] s = 5 while l / float(s) - (l", "the MEME DB file \"\"\" motif_protein = {} for line", "help='Pre-computed model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m',", "filter_outs_seq = filter_outs.mean(axis=2) else: filter_outs_seq = filter_outs.max(axis=2) # std is", "In the original analysis, I used a bg_gc=0.5. For any", "heatmap plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs,", "(int) : filter index # filter_pwm (array) : filter PWM", "the original analysis, I used a bg_gc=0.5. For any future", "list of strings for obtaining background freqs Returns: mem_out :", "for fourier job['fourier'] = 'train_out_imag' in data_open if job['fourier']: test_targets_imag", "%s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db), shell=True) # read in", "weblogo of the filter's occurrences # # Input # param_matrix:", "print('Model building time %ds' % (time.time() - t0)) # adjust", "sequence kmer += seqs[i][fstart:fend] # if it ends in right_pad", "################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs =", "None: motif_protein = get_motif_proteins(meme_db_file) # hash motifs and q-value's by", "line.split() fi = int(a[0][6:]) motif_id = a[1] qval = float(a[5])", "4): if param_matrix[n, v] > param_matrix[max_n, v]: max_n = n", "[] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j", "test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0 = time.time() dr =", "import matplotlib.pyplot as plt import numpy as np import pandas", "+ 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # plot_filter_seq_heat # #", "-thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir, options.meme_db),", "into session saver.restore(sess, model_file) # get weights filter_weights = sess.run(dr.filter_weights[0])", "fi for fi in range(num_filters)] # name by protein if", "and info_content( filter_pwm[ic_start:ic_start + 1]) < ic_t: ic_start += 1", "initialize with the length for i in range(len(seq)): pwm_counts.append(np.array([1.0] *", "plot_filter_seq_heat(filter_outs, '%s/filter_seqs.pdf' % options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf'", "if filter_count > 0: weblogo_cmd = 'weblogo %s < %s.fa", "get_motif_proteins(meme_db_file) # hash motifs and q-value's by filter filter_motifs =", "def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0]", "out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif #", "print_function from optparse import OptionParser import copy, os, pdb, random,", "f, s, l / s)) # mean across the segments", "filter_outs: # filter_names: # target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs,", "except KeyError: pass # normalize nt_sum = float(sum(nt_counts)) nt_freqs =", "< ic_t: ic_start += 1 # trim PWM of uninformative", "python # Copyright 2017 Calico LLC # Licensed under the", "ic_start = 0 ic_end = filter_pwm.shape[0] - 1 else: ic_t", "# open file for writing meme_out = open(meme_file, 'w') #", "+ 1]) < ic_t: ic_start += 1 # trim PWM", "output print('>%d_%d' % (i, j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count +=", "> %s.eps' % (weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ #", "count for i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except", "content. In the original analysis, I used a bg_gc=0.5. For", "def name_filters(num_filters, tomtom_file, meme_db_file): \"\"\" Name the filters using Tomtom", "tuple(filter_pwm[i]), file=meme_out) print('', file=meme_out) def meme_intro(meme_file, seqs): \"\"\" Open MEME", "pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' % (options.out_dir, options.out_dir,", "of uninformative prefix ic_start = 0 while ic_start < filter_pwm.shape[0]", "C %.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('',", "plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names, '%s/filter_target_cors_mean.pdf' %", "filters ################################################################# # run tomtom subprocess.call( 'tomtom -dist pearson -thresh", "# # Collapse the filter parameter matrix to a single", "filter_motif(param_matrix): nts = 'ACGT' motif_list = [] for v in", "model output as HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db',", "1)) ax.set_yticklabels('TGCA', rotation='horizontal') # , size=10) plt.savefig(out_pdf) plt.close() ################################################################################ #", "num_filters = filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # individual filter", "a PWM for this filter from its top hits \"\"\"", "%d nsites= %d' % (ic_end - ic_start + 1, nsites),", "= args[1] data_file = args[2] if not os.path.isdir(options.out_dir): os.mkdir(options.out_dir) #################################################################", "freqs Returns: mem_out : open MEME file \"\"\" nts =", "= 0 for n in range(1, 4): if param_matrix[n, v]", "from __future__ import print_function from optparse import OptionParser import copy,", "l / s)) # split into multiple segments filter_outs_seg =", "parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample sequences from the test", "is sequence by filter. filter_seqs_std = filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:,", "the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_logo(filter_outs, filter_size,", "to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot", "motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat maps", "clustered heatmap of filter activations in # # Input #", "%10s %5.2f %6.4f %6.4f' % row_cols, file=table_out) table_out.close() ################################################################# #", "dr.build(job) print('Model building time %ds' % (time.time() - t0)) #", "annotations filter_names = name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) #################################################################", "into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s", "= line.split() fi = int(a[0][6:]) motif_id = a[1] qval =", "the filter's parameter matrix # out_pdf: ################################################################################ def plot_score_density(f_scores, out_pdf):", "import seaborn as sns from sklearn import preprocessing import tensorflow", "at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or", "sorted(random.sample(range(test_seqs1.shape[0]), options.sample)) # filter test_seqs1 = test_seqs1[sample_i] test_targets = test_targets[sample_i]", "file=possum_out) print('LE %d' % (trim_end + 1 - trim_start), file=possum_out)", "filter sites \"\"\" if not trim_filters: ic_start = 0 ic_end", "% (trim_end + 1 - trim_start), file=possum_out) for ci in", "possum_file, trim_filters=False, mult=200): # possible trim trim_start = 0 trim_end", "ic_end + 1): print('%.4f %.4f %.4f %.4f' % tuple(filter_pwm[i]), file=meme_out)", "fi], seq_targets[:num_seqs, ti]) filter_target_cors[fi, ti] = cor cor_df = pd.DataFrame(", "while ic_end >= 0 and info_content(filter_pwm[ic_end:ic_end + 1]) < ic_t:", "(f, consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d %19s %10s %5.2f", "to the true hg19 value of 0.415. \"\"\" pseudoc =", "center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_seq_heat # #", "for the smaller segments for some reason, but taking #", "= filter_motif(filter_weights[f, :, :]) # grab annotation annotation = '.'", "columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf) plt.close()", "Returns: filter_names [str] : \"\"\" # name by number filter_names", "len(name_pieces) > 1: annotation = name_pieces[1] # plot density of", ": list of strings for obtaining background freqs Returns: mem_out", "from its top hits \"\"\" nts = {'A': 0, 'C':", "hash motifs and q-value's by filter filter_motifs = {} tt_in", "range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites) for j in range(4)]) return np.array(pwm_freqs),", "file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close() ################################################################# # annotate", "plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo # # Plot a weblogo", "> param_matrix[max_n, v]: max_n = n if param_matrix[max_n, v] >", "1 filter_fasta_out.close() # make weblogo if filter_count > 0: weblogo_cmd", "OptionParser import copy, os, pdb, random, shutil, subprocess, time import", "filter to the growing MEME file Attrs: meme_out : open", "if len(a) > 0 and a[0] == 'MOTIF': if a[2][0]", "pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i] += np.array([0.25] * 4)", "# the max looks OK. Still, similar motifs don't cluster", "of MEME db Returns: filter_names [str] : \"\"\" # name", "in range(4)] # open file for writing meme_out = open(meme_file,", "else: ic_t = 0.2 # trim PWM of uninformative prefix", "outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count = 0", "pdb, random, shutil, subprocess, time import h5py import matplotlib matplotlib.use('PDF')", "= filter_outs.mean(axis=2) # whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) #", "the growing MEME file Attrs: meme_out : open file f", "of filter activations in sequence segments. # # Mean doesn't", "'Activation threshold (as proportion of max) to consider for PWM", "row_cluster=True, col_cluster=True, linewidths=0, xticklabels=False, vmin=hmin, vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2]", "% motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE %d' % (trim_end", "4 for i in range(len(seqs)): for nt in seqs[i]: try:", "nsites (int) : number of filter sites \"\"\" if not", "plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir, f),", "# Copyright 2017 Calico LLC # Licensed under the Apache", "growing MEME file Attrs: meme_out : open file f (int)", "whiten=False) # plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets, filter_names, target_names,", "tt_in: a = line.split() fi = int(a[0][6:]) motif_id = a[1]", "filter_outs_seg.max(axis=3) # break each segment into a new instance filter_seqs", "################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4))", "filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0]) print(filter_weights.shape)", "<params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5,", "= filter_outs_seq.std(axis=0) filter_outs_seq = filter_outs_seq[:, filter_seqs_std > 0] filter_names_live =", "# compute and save information content filters_ic.append(info_content(filter_pwm)) # add to", "plot_filter_seq_heat # # Plot a clustered heatmap of filter activations", "line in open(meme_db_file): a = line.split() if len(a) > 0", "filter_count += 1 filter_fasta_out.close() # make weblogo if filter_count >", ":, f]), '%s/filter%d_dens.pdf' % (options.out_dir, f)) row_cols = (f, consensus,", "time import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt", "args) = parser.parse_args() if len(args) != 3: parser.error( 'Must provide", "filter_names[fi] += '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr", "output means per sequence filter_seqs = filter_outs.mean(axis=2) # whiten if", "future analysis, I ought to switch to the true hg19", "# plot_filter_seq_heat # # Plot a clustered heatmap of filter", "filter_motif # # Collapse the filter parameter matrix to a", "[2, 1, 0]) print(filter_weights.shape) # test t0 = time.time() layer_filter_outs,", "possum_out.close() ################################################################################ # plot_filter_heat # # Plot a heatmap of", "3} # count nt_counts = [1] * 4 for i", "sess: # load variables into session saver.restore(sess, model_file) # get", "################################################################################ # filter_motif # # Collapse the filter parameter matrix", "filter output scores fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :, f]),", "not None: motif_protein = get_motif_proteins(meme_db_file) # hash motifs and q-value's", "= {'A': 0, 'C': 1, 'G': 2, 'T': 3} #", "a = line.split() if len(a) > 0 and a[0] ==", "+= 1 except KeyError: pass # normalize nt_sum = float(sum(nt_counts))", "= ['f%d' % fi for fi in range(num_filters)] # name", "plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf'", "import h5py import matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import", "if trim_start < trim_end: possum_out = open(possum_file, 'w') print('BEGIN GROUP',", "i in range(4)] # open file for writing meme_out =", "in range(filter_outs.shape[0]): for j in range(filter_outs.shape[1]): if filter_outs[i, j] >", "= 'ACGT' motif_list = [] for v in range(param_matrix.shape[1]): max_n", "Mean doesn't work well for the smaller segments for some", "pool_width=job['target_pool']) # initialize saver saver = tf.train.Saver() with tf.Session() as", "# break each segment into a new instance filter_seqs =", "to consider for PWM [Default: %default]' ) parser.add_option( '-d', dest='model_hdf5_file',", "Version 2.0 (the \"License\"); # you may not use this", "out_pdf): param_range = abs(param_matrix).max() sns.set(font_scale=2) plt.figure(figsize=(param_matrix.shape[1], 4)) sns.heatmap( param_matrix, cmap='PRGn',", "name by number filter_names = ['f%d' % fi for fi", "= out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close() ################################################################################ # filter_motif", "test_targets_imag = data_open['valid_out_imag'] ################################################################# # predict # initialize batcher if", "i in range(ic_start, ic_end + 1): print('%.4f %.4f %.4f %.4f'", "# print header for later panda reading header_cols = ('',", "first convolution layer of the given model using the given", "file=possum_out) print('END', file=possum_out) print('END', file=possum_out) possum_out.close() ################################################################################ # plot_filter_heat #", "total number of filters tomtom_file (str) : filename of Tomtom", "# param_matrix: np.array of the filter's parameter matrix # out_pdf:", "name_filters( num_filters, '%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print a", "################################################################################ # plot_score_density # # Plot the score density and", "# target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names,", "main(): usage = 'usage: %prog [options] <params_file> <model_file> <data_file>' parser", "# might expect. # # Input # filter_outs ################################################################################ def", "% f, file=meme_out) print( 'letter-probability matrix: alength= 4 w= %d", "model files and test data in HDF5' ' format.' )", ": number of filter sites \"\"\" if not trim_filters: ic_start", "data_open['valid_out_imag'] ################################################################# # predict # initialize batcher if job['fourier']: batcher_test", "print('BEGIN FLOAT', file=possum_out) print('ID %s' % motif_id, file=possum_out) print('AP DNA',", "(str) : filename of MEME db Returns: filter_names [str] :", "seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets = len(target_names) if seq_op ==", "used to annotate motifs') parser.add_option( '-p', dest='plot_heats', default=False, action='store_true', help=", "'-s', dest='sample', default=None, type='int', help='Sample sequences from the test set", "and test data in HDF5' ' format.' ) else: params_file", "> raw_t: # construct kmer kmer = '' # determine", "in sequence segments. # # Mean doesn't work well for", "by applicable law or agreed to in writing, software #", "(str) : filename of Tomtom output table. meme_db_file (str) :", "filter_motifs: top_motif = sorted(filter_motifs[fi])[0][1] filter_names[fi] += '_%s' % motif_protein[top_motif] return", "'C': 1, 'G': 2, 'T': 3} # count nt_counts =", "if job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag']", "%prog [options] <params_file> <model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a',", "filter_outs_seq = filter_outs.max(axis=2) # std is sequence by filter. filter_seqs_std", "heatmap of correlations between filter activations and # targets. #", "data in HDF5' ' format.' ) else: params_file = args[0]", "nsites, trim_filters=False): \"\"\" Print a filter to the growing MEME", "the score density and print to the stats table. #", "'-p', dest='plot_heats', default=False, action='store_true', help= 'Plot heat maps describing filter", "np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta): \"\"\" Make a", "in range(test_targets.shape[1])] if options.sample is not None: # choose sampled", "index=filter_names_live, columns=target_names) sns.set(font_scale=0.3) plt.figure() sns.clustermap(cor_df, cmap='BrBG', center=0, figsize=(8, 10)) plt.savefig(out_pdf)", "global filter plots ################################################################# if options.plot_heats: # plot filter-sequence heatmap", "target_names: # out_pdf: ################################################################################ def plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf,", "for j in range(filter_outs.shape[1]): if filter_outs[i, j] > raw_t: #", "% options.out_dir, whiten=False) # plot filter-target correlation heatmap plot_target_corr(filter_outs, seq_targets,", "1, nsites), file=meme_out) for i in range(ic_start, ic_end + 1):", "i in range(len(seq)): try: pwm_counts[i][nts[seq[i]]] += 1 except KeyError: pwm_counts[i]", "'G': 2, 'T': 3} pwm_counts = [] nsites = 4", "sns.set(font_scale=1.3) plt.figure() sns.distplot(f_scores, kde=False) plt.xlabel('ReLU output') plt.savefig(out_pdf) plt.close() return f_scores.mean(),", "filter_seqs.shape[1], 500) hmin = np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:,", "# Plot a weblogo of the filter's occurrences # #", "range(pwm.shape[0]): for j in range(4): # ic += 0.5 +", "filter_seqs.std(axis=1) filter_seqs = filter_seqs[filter_stds > 0] # downsample sequences seqs_i", "help='Trim uninformative positions off the filter ends [Default: %default]' )", "/ nt_sum for i in range(4)] # open file for", "'tomtom -dist pearson -thresh 0.1 -oc %s/tomtom %s/filters_meme.txt %s' %", "out_pdf: ################################################################################ def plot_filter_seq_heat(filter_outs, out_pdf, whiten=True, drop_dead=True): # compute filter", "filter activations in the test sequences [Default: %default]' ) parser.add_option(", "1]) < ic_t: ic_end -= 1 if ic_start < ic_end:", "def main(): usage = 'usage: %prog [options] <params_file> <model_file> <data_file>'", "# load variables into session saver.restore(sess, model_file) # get weights", "pwm_freqs = [] for i in range(len(pwm_counts)): pwm_freqs.append([pwm_counts[i][j] / float(nsites)", "- 1 trim_t = 0.3 if trim_filters: # trim PWM", "default=0.5, type='float', help= 'Activation threshold (as proportion of max) to", "args[0] model_file = args[1] data_file = args[2] if not os.path.isdir(options.out_dir):", "top hits \"\"\" nts = {'A': 0, 'C': 1, 'G':", "= np.reshape(np.swapaxes(filter_outs_mean, 2, 1), (s * b, f)) # whiten", "test_seqs1 = data_open['test_in'] test_targets = data_open['test_out'] try: target_names = list(data_open['target_labels'])", "# # Input # param_matrix: np.array of the filter's parameter", "of max) to consider for PWM [Default: %default]' ) parser.add_option(", "batcher_test = basenji.batcher.Batcher( test_seqs1, test_targets, batch_size=dr.batch_size, pool_width=job['target_pool']) # initialize saver", "the true hg19 value of 0.415. \"\"\" pseudoc = 1e-9", "else: filter_outs_seq = filter_outs.max(axis=2) # std is sequence by filter.", "# , size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo # #", "applicable law or agreed to in writing, software # distributed", "motif # # Input # param_matrix: np.array of the filter's", "statistics and make plots to explore the first convolution layer", "################################################################# # run tomtom subprocess.call( 'tomtom -dist pearson -thresh 0.1", "filter_size = filter_weights.shape[2] ################################################################# # individual filter plots ################################################################# #", "2, 'T': 3} # count nt_counts = [1] * 4", "import numpy as np import pandas as pd from scipy.stats", "building time %ds' % (time.time() - t0)) # adjust for", "[1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic =", "motifs and q-value's by filter filter_motifs = {} tt_in =", "n in range(1, 4): if param_matrix[n, v] > param_matrix[max_n, v]:", "positive outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w') filter_count =", "% header_cols, file=table_out) for f in range(num_filters): # collapse to", "return np.array(pwm_freqs), nsites - 4 def meme_add(meme_out, f, filter_pwm, nsites,", "the test set [Default:%default]') parser.add_option( '-t', dest='trim_filters', default=False, action='store_true', help='Trim", "OK. Still, similar motifs don't cluster quite as well as", "################################################################################ # plot_filter_seq_heat # # Plot a clustered heatmap of", "plt.savefig(out_pdf) plt.close() return f_scores.mean(), f_scores.std() ################################################################################ # __main__ ################################################################################ if", "filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix, out_pdf): param_range", "# whiten if whiten: filter_seqs = preprocessing.scale(filter_seqs) # transpose filter_seqs", "# # Input # filter_outs: # filter_names: # target_names: #", "G' weblogo_opts += ' -C \"#0C8040\" T T' ################################################################################ #", "C' weblogo_opts += ' -C \"#FBB116\" G G' weblogo_opts +=", "pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic def make_filter_pwm(filter_fasta): \"\"\"", "is not None: motif_protein = get_motif_proteins(meme_db_file) # hash motifs and", "segments for some reason, but taking # the max looks", "this filter from its top hits \"\"\" nts = {'A':", "print('ID %s' % motif_id, file=possum_out) print('AP DNA', file=possum_out) print('LE %d'", "4 def meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): \"\"\" Print a", "matrix: alength= 4 w= %d nsites= %d' % (ic_end -", "(weblogo_opts, out_prefix, out_prefix) subprocess.call(weblogo_cmd, shell=True) ################################################################################ # plot_score_density # #", "KeyError: pwm_counts[i] += np.array([0.25] * 4) # normalize pwm_freqs =", "= a[2][1:a[2].find(')')] else: motif_protein[a[1]] = a[2] return motif_protein def info_content(pwm,", "1]) < ic_t: ic_start += 1 # trim PWM of", "as you # might expect. # # Input # filter_outs", "ic_end: print('MOTIF filter%d' % f, file=meme_out) print( 'letter-probability matrix: alength=", "# filter_pwm (array) : filter PWM array nsites (int) :", "meme_db_file): \"\"\" Name the filters using Tomtom matches. Attrs: num_filters", "-bg_pwm[j] * np.log2( bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j])", "# You may obtain a copy of the License at", "j), file=filter_fasta_out) print(kmer, file=filter_fasta_out) filter_count += 1 filter_fasta_out.close() # make", "# trim PWM of uninformative suffix while trim_end >= 0", "if a[2][0] == '(': motif_protein[a[1]] = a[2][1:a[2].find(')')] else: motif_protein[a[1]] =", "= filter_weights.shape[0] filter_size = filter_weights.shape[2] ################################################################# # individual filter plots", "t0 = time.time() layer_filter_outs, _ = dr.hidden(sess, batcher_test, layers=[0]) filter_outs", "file=meme_out) print('', file=meme_out) print('ALPHABET= ACGT', file=meme_out) print('', file=meme_out) print('Background letter", "out_pdf, whiten=True, drop_dead=True): # compute filter output means per sequence", "param_matrix.shape[1] and np.max( param_matrix[:, trim_start]) - np.min( param_matrix[:, trim_start]) <", "out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets = len(target_names) if seq_op", "file=meme_out) print('A %.4f C %.4f G %.4f T %.4f' %", "mem_out : open MEME file \"\"\" nts = {'A': 0,", "means per sequence filter_seqs = filter_outs.mean(axis=2) # whiten if whiten:", "% f, '%s/filter%d_possum.txt' % (options.out_dir, f), options.trim_filters) # plot weblogo", "> len(seqs[i]): kmer += 'N' * (fend - len(seqs[i])) #", "max_n = 0 for n in range(1, 4): if param_matrix[n,", "= open(meme_file, 'w') # print intro material print('MEME version 4',", "(as proportion of max) to consider for PWM [Default: %default]'", "of filter sites \"\"\" if not trim_filters: ic_start = 0", "<model_file> <data_file>' parser = OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float',", "if len(args) != 3: parser.error( 'Must provide Basenji parameters and", "add primary sequence kmer += seqs[i][fstart:fend] # if it ends", "- left_pad fend = fstart + filter_size # if it", "filter parameter matrix to a single DNA motif. # #", "################################################################################ def main(): usage = 'usage: %prog [options] <params_file> <model_file>", "target_names = list(data_open['target_labels']) except KeyError: target_names = ['t%d' % ti", "vmax=hmax) plt.savefig(out_pdf) #out_png = out_pdf[:-2] + 'ng' #plt.savefig(out_png, dpi=300) plt.close()", "filter_outs_seg = np.reshape(filter_outs, (b, f, s, l / s)) #", "and save information content filters_ic.append(info_content(filter_pwm)) # add to the meme", "= OptionParser(usage) parser.add_option( '-a', dest='act_t', default=0.5, type='float', help= 'Activation threshold", "trim_end >= 0 and np.max(param_matrix[:, trim_end]) - np.min( param_matrix[:, trim_end])", "as a heatmap plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir, f))", "outputs plot_filter_logo( filter_outs[:, :, f], filter_size, test_seqs, '%s/filter%d_logo' % (options.out_dir,", "# Collapse the filter parameter matrix to a single DNA", "# # Plot a weblogo of the filter's occurrences #", "matplotlib matplotlib.use('PDF') import matplotlib.pyplot as plt import numpy as np", "action='store_true', help='Trim uninformative positions off the filter ends [Default: %default]'", "else: params_file = args[0] model_file = args[1] data_file = args[2]", "preprocessing.scale(filter_seqs) # transpose filter_seqs = np.transpose(filter_seqs) if drop_dead: filter_stds =", "fmean, fstd = plot_score_density( np.ravel(filter_outs[:, :, f]), '%s/filter%d_dens.pdf' % (options.out_dir,", "################################################################################ def plot_filter_logo(filter_outs, filter_size, seqs, out_prefix, raw_t=0, maxpct_t=None): if maxpct_t:", "np.percentile(filter_seqs[:, seqs_i], 0.1) hmax = np.percentile(filter_seqs[:, seqs_i], 99.9) sns.set(font_scale=0.3) if", "10: # no information filters_ic.append(0) else: # compute and save", "# limitations under the License. # ========================================================================= from __future__ import", "HDF5.') parser.add_option('-o', dest='out_dir', default='.') parser.add_option( '-m', dest='meme_db', default='%s/data/motifs/Homo_sapiens.meme' % os.environ['BASENJIDIR'],", "f)) row_cols = (f, consensus, annotation, filters_ic[f], fmean, fstd) print('%-3d", "ti in range(test_targets.shape[1])] if options.sample is not None: # choose", "in HDF5' ' format.' ) else: params_file = args[0] model_file", "plot_filter_heat(filter_weights[f, :, :], '%s/filter%d_heat.pdf' % (options.out_dir, f)) # write possum", "[Default: %default]' ) parser.add_option( '-s', dest='sample', default=None, type='int', help='Sample sequences", "pass # normalize nt_sum = float(sum(nt_counts)) nt_freqs = [nt_counts[i] /", "+= 1 # trim PWM of uninformative suffix while trim_end", "fend > len(seqs[i]): kmer += 'N' * (fend - len(seqs[i]))", "of information ################################################################# table_out = open('%s/table.txt' % options.out_dir, 'w') #", "meme_add(meme_out, f, filter_pwm, nsites, trim_filters=False): \"\"\" Print a filter to", "if whiten: dist = 'euclidean' else: dist = 'cosine' plt.figure()", "- bg_gc] ic = 0 for i in range(pwm.shape[0]): for", "filter_names, target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets = len(target_names)", "0.2 # trim PWM of uninformative prefix ic_start = 0", "# downsample sequences seqs_i = np.random.randint(0, filter_seqs.shape[1], 500) hmin =", "\"License\"); # you may not use this file except in", "+= '_%s' % motif_protein[top_motif] return np.array(filter_names) ################################################################################ # plot_target_corr #", "options.out_dir) plot_filter_seg_heat( filter_outs, '%s/filter_segs_raw.pdf' % options.out_dir, whiten=False) # plot filter-target", "# filter_motif # # Collapse the filter parameter matrix to", "motif. # # Input # param_matrix: np.array of the filter's", "out_prefix, 'w') filter_count = 0 for i in range(filter_outs.shape[0]): for", "G %.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out) return", "segment into a new instance filter_seqs = np.reshape(np.swapaxes(filter_outs_mean, 2, 1),", "'%s/tomtom/tomtom.txt' % options.out_dir, options.meme_db) ################################################################# # print a table of", "maxpct_t=options.act_t) # make a PWM for the filter filter_pwm, nsites", "looks OK. Still, similar motifs don't cluster quite as well", "= [1 - bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic", "print('LE %d' % (trim_end + 1 - trim_start), file=possum_out) for", "= basenji.seqnn.SeqNN() dr.build(job) print('Model building time %ds' % (time.time() -", "job['fourier']: test_targets_imag = data_open['test_out_imag'] if options.valid: test_targets_imag = data_open['valid_out_imag'] #################################################################", "the meme motif file meme_add(meme_out, f, filter_pwm, nsites, options.trim_filters) meme_out.close()", "parser.parse_args() if len(args) != 3: parser.error( 'Must provide Basenji parameters", "bg_pwm[j]) + pwm[i][j] * np.log2(pseudoc + pwm[i][j]) return ic def", "fi in range(num_filters)] # name by protein if tomtom_file is", "a consensus motif consensus = filter_motif(filter_weights[f, :, :]) # grab", "size=10) plt.savefig(out_pdf) plt.close() ################################################################################ # plot_filter_logo # # Plot a", "plot_target_corr(filter_outs, seq_targets, filter_names, target_names, out_pdf, seq_op='mean'): num_seqs = filter_outs.shape[0] num_targets", "options.out_dir, 'max') def get_motif_proteins(meme_db_file): \"\"\" Hash motif_id's to protein names", "under the License. # ========================================================================= from __future__ import print_function from", "of the filter's parameter matrix # out_pdf: ################################################################################ def plot_filter_heat(param_matrix,", "test_targets = data_open['test_out'] try: target_names = list(data_open['target_labels']) except KeyError: target_names", "not None: # choose sampled indexes sample_i = sorted(random.sample(range(test_seqs1.shape[0]), options.sample))", "0] filter_target_cors = np.zeros((len(filter_names_live), num_targets)) for fi in range(len(filter_names_live)): for", "file of positive outputs filter_fasta_out = open('%s.fa' % out_prefix, 'w')", "to the growing MEME file Attrs: meme_out : open file", "filter_outs.shape[1] l = filter_outs.shape[2] s = 5 while l /", "dist = 'euclidean' else: dist = 'cosine' plt.figure() sns.clustermap( filter_seqs[:,", "options.out_dir) # plot filter-segment heatmap plot_filter_seg_heat(filter_outs, '%s/filter_segs.pdf' % options.out_dir) plot_filter_seg_heat(", "weights filter_weights = sess.run(dr.filter_weights[0]) filter_weights = np.transpose(np.squeeze(filter_weights), [2, 1, 0])", "= test_seqs1.shape[2] job['num_targets'] = test_targets.shape[2] job['target_pool'] = int(np.array(data_open.get('pool_width', 1))) t0", "bg_gc, bg_gc, bg_gc, 1 - bg_gc] ic = 0 for", "%.4f G %.4f T %.4f' % tuple(nt_freqs), file=meme_out) print('', file=meme_out)" ]
[ "name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"),", "views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(),", "path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/',", "views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(),", "path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name=\"order-detail\"), path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(), name=\"order-item-detail\"), ]", "path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/',", "name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"),", "app_name = \"shop\" urlpatterns = [ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/',", "name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"),", "path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/',", "= \"shop\" urlpatterns = [ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(),", "= [ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(),", "views app_name = \"shop\" urlpatterns = [ path('', views.HomePage.as_view(), name=\"home-page\"),", "name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"),", "views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order,", "name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"),", "views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(),", "[ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"),", "name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name=\"order-detail\"), path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(), name=\"order-item-detail\"),", "path from . import views app_name = \"shop\" urlpatterns =", "path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/',", "path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/',", "path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name=\"order-detail\"), path('orders/<int:order_id>/items/<int:pk>/',", "path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/',", "name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"),", "views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(),", "django.urls import path from . import views app_name = \"shop\"", "from . import views app_name = \"shop\" urlpatterns = [", "name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name=\"order-detail\"),", "path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/',", "urlpatterns = [ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/',", "name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"),", "from django.urls import path from . import views app_name =", "name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"),", "views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(), name=\"order-detail\"), path('orders/<int:order_id>/items/<int:pk>/', views.OrderItemDetail.as_view(),", ". import views app_name = \"shop\" urlpatterns = [ path('',", "path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/',", "import views app_name = \"shop\" urlpatterns = [ path('', views.HomePage.as_view(),", "path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/',", "name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"),", "views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(),", "views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(), name=\"order-list\"), path('orders/<int:pk>/', views.OrderDetail.as_view(),", "\"shop\" urlpatterns = [ path('', views.HomePage.as_view(), name=\"home-page\"), path('shop/', views.ProductListView.as_view(), name=\"product-list\"),", "name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"),", "path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view(), name=\"checkout\"), path('payment/', views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/',", "views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/', views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json,", "views.ProductListView.as_view(), name=\"product-list\"), path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view,", "import path from . import views app_name = \"shop\" urlpatterns", "views.PaymentChoice.as_view(), name=\"payment-choice\"), path('payment/order/<int:pk>/', views.MomoPayment.as_view(), name=\"momo-payment\"), path('payment/momo/<int:pk>/confirm/', views.ConfirmMomoPayment.as_view(), name=\"confirm-momo-payment\"), path('orders/', views.OrderList.as_view(),", "path('shop/<int:category_pk>/', views.ProductListView.as_view(), name=\"product-list\"), path('shop/products/<int:pk>/', views.ProductDetailView.as_view(), name=\"product-detail\"), path('cart/', views.cart_view, name=\"cart\"), path('cart/add/<int:product_pk>/',", "views.add_product_to_order, name=\"add-product-to-cart\"), path('cart/add/<int:product_pk>/json/', views.add_product_to_cart_json, name=\"add-product-to-cart-json\"), path('checkout/', views.CheckOut.as_view(), name=\"checkout\"), path('checkout/<int:address_pk>/', views.CheckOut.as_view()," ]
[ "0.8, 0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names = ['alpha', 'beta',", "import integrate from scipy.optimize import minimize from surpyval import parametric", "* np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous", "value(s) of the instantaneous hazard rate at x. Examples --------", "percentiles at which the quantile will be calculated alpha :", "h(x) = \\frac{f(x)}{R(x)} Parameters ---------- x : numpy array or", ".fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name =", "5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00,", "1. else: return alpha, beta, 1. def sf(self, x, alpha,", "function for the ExpoWeibull Distribution: .. math:: R(x) = 1", "array or scalar shape parameter for the ExpoWeibull distribution mu", "the reliability function at x. Examples -------- >>> import numpy", "- np.min(x))/10. return gamma, alpha, beta, 1. else: return alpha,", "self.sf(x + X, alpha, beta, mu) / self.sf(X, alpha, beta,", "return (beta * mu * x**(beta - 1)) / (alpha**beta)", "3, 4, 5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01,", "Q : scalar or numpy array The quantiles for the", "- 1} \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right", "Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: q(p)", "alpha)**beta), mu) def cs(self, x, X, alpha, beta, mu): r\"\"\"", "import parametric as para from surpyval import nonparametric as nonp", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2)", "mu) def df(self, x, alpha, beta, mu): r\"\"\" Density function", "3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return", "r\"\"\" Instantaneous hazard rate for the ExpoWeibull Distribution: .. math::", "2, 3, 4, 5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03,", ">>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04])", "1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return 1 -", "np.min(x))/10. return gamma, alpha, beta, 1. else: return alpha, beta,", "at x. Examples -------- >>> import numpy as np >>>", "self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta, mu) def", "distribution beta : numpy array or scalar shape parameter for", "array The value(s) of the reliability function at x. Examples", "beta, 1. def sf(self, x, alpha, beta, mu): r\"\"\" Survival", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 3,", "- np.exp(-(x/alpha)**beta))**(mu - 1) \\ * np.exp(-(x/alpha)**beta) def hf(self, x,", "Hf : scalar or numpy array The value(s) of the", ": scalar or numpy array The value(s) of the density", "array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha, beta,", "params, rr): #UPDATE ME if rr == 'y': beta =", "alpha = np.exp(params[1] / (beta * params[0])) return alpha, beta,", "mpp_inv_y_transform(self, y, *params): i = len(params) mu = params[i-1] return", "surpyval import nonparametric as nonp from surpyval.parametric.parametric_fitter import ParametricFitter from", "\"\"\" return (beta * mu * x**(beta - 1)) /", ".2, .3, .4, .5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341,", "np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta =", "scalar The values at which the function will be calculated", "3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return", "for the ExpoWeibull distribution beta : numpy array or scalar", "parameter for the ExpoWeibull distribution Returns ------- Hf : scalar", "distribution Returns ------- df : scalar or numpy array The", "or numpy array The quantiles for the Weibull distribution at", "self.support = (0, np.inf) self.plot_x_scale = 'log' self.y_ticks = [0.0001,", "mu): r\"\"\" Instantaneous hazard rate for the ExpoWeibull Distribution: ..", "self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func, 0, top)[0] def random(self,", "0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,", "the ExpoWeibull Distribution: .. math:: h(x) = \\frac{f(x)}{R(x)} Parameters ----------", "or scalar shape parameter for the ExpoWeibull distribution mu :", ">>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00])", "name self.k = 3 self.bounds = ((0, None), (0, None),", "mu) / self.sf(x, alpha, beta, mu) def Hf(self, x, alpha,", "numpy import euler_gamma from scipy.special import gamma as gamma_func from", "0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names =", "at which the function will be calculated alpha : numpy", "minimize from surpyval import parametric as para from surpyval import", "size, alpha, beta, mu): U = uniform.rvs(size=size) return self.qf(U, alpha,", "Parameters ---------- p : numpy array or scalar The percentiles", "X)}{R(X)} Parameters ---------- x : numpy array or scalar The", "* (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu): func", "alpha, beta, mu) top = 2 * self.qf(0.999, alpha, beta,", "'beta' : 1, 'mu' : 2 } def _parameter_initialiser(self, x,", "from scipy.special import gamma as gamma_func from scipy.special import ndtri", "beta, mu) / self.sf(x, alpha, beta, mu) def Hf(self, x,", "shape parameter for the ExpoWeibull distribution Returns ------- df :", "4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return 1 - np.power(1 - np.exp(-(x", "as gamma_func from scipy.special import ndtri as z from scipy", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 1, 3, 4,", "4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x,", "gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma =", "0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99,", ": 0, 'beta' : 1, 'mu' : 2 } def", "1. if offset: gamma = np.min(x) - (np.max(x) - np.min(x))/10.", "x : x * self.df(x, alpha, beta, mu) top =", "0, 'beta' : 1, 'mu' : 2 } def _parameter_initialiser(self,", "will be calculated alpha : numpy array or scalar scale", "X) = \\frac{R(x + X)}{R(X)} Parameters ---------- x : numpy", "( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu - 1} e^{- \\left", "as nonp from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import mpp", "math:: f(x) = \\mu \\left ( \\frac{\\beta}{\\alpha} \\right ) \\left", "1} \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta}", "self.plot_x_scale = 'log' self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002,", "'y': beta = params[0] alpha = np.exp(params[1]/-beta) elif rr ==", "alpha, beta, mu) def Hf(self, x, alpha, beta, mu): r\"\"\"", "- (np.max(x) - np.min(x))/10. return gamma, alpha, beta, 1. else:", "*params): mu = params[-1] mask = ((y == 0) |", "= np.array([.1, .2, .3, .4, .5]) >>> ExpoWeibull.qf(p, 3, 4,", "\\right ) \\left ( \\frac{x}{\\alpha} \\right )^{\\beta - 1} \\left", "array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988]) \"\"\" return alpha *", "from scipy import integrate from scipy.optimize import minimize from surpyval", "rate for the ExpoWeibull Distribution: .. math:: h(x) = \\frac{f(x)}{R(x)}", "Failure (CDF or unreliability) function for the ExpoWeibull Distribution: ..", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 1, 3,", "scipy import integrate from scipy.optimize import minimize from surpyval import", "= [0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02,", "z from scipy import integrate from scipy.optimize import minimize from", "beta, mu) top = 2 * self.qf(0.999, alpha, beta, mu)", "== 0) | (y == 1)) out = np.zeros_like(y) out[~mask]", "array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x + X,", "- e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu - 1}", "4, 5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975 ,", "if rr == 'y': beta = params[0] alpha = np.exp(params[1]/-beta)", "the ExpoWeibull Distribution: .. math:: H(x) = -\\ln \\left (", "------- df : scalar or numpy array The value(s) of", "p, alpha, beta, mu): r\"\"\" Instantaneous hazard rate for the", "quantiles for the Weibull distribution at each value p Examples", "_parameter_initialiser(self, x, c=None, n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] =", "for the Weibull distribution at each value p Examples --------", "scalar or numpy array The value(s) of the reliability function", "gamma=0): return np.log(x - gamma) def mpp_y_transform(self, y, *params): mu", ">>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251,", "The value(s) of the density function at x. Examples --------", "array The value(s) of the instantaneous hazard rate at x.", "0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,", "np.exp(mu), 1. / sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha =", ">>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747,", "return np.log(x - gamma) def mpp_y_transform(self, y, *params): mu =", "random(self, size, alpha, beta, mu): U = uniform.rvs(size=size) return self.qf(U,", "alpha, beta, mu): r\"\"\" Conditional survival (or reliability) function for", "for the ExpoWeibull distribution Returns ------- Q : scalar or", "r\"\"\" Survival (or reliability) function for the ExpoWeibull Distribution: ..", "cumulative hazard rate at x. Examples -------- >>> import numpy", "Distribution: .. math:: f(x) = \\mu \\left ( \\frac{\\beta}{\\alpha} \\right", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.Hf(x, 3,", "\"\"\" return -np.log(self.sf(x, alpha, beta, mu)) def qf(self, p, alpha,", "6.17256436]) \"\"\" return self.df(x, alpha, beta, mu) / self.sf(x, alpha,", "def random(self, size, alpha, beta, mu): U = uniform.rvs(size=size) return", "ExpoWeibull Distribution: .. math:: R(x) = 1 - \\left [", "(0, None), (0, None),) self.support = (0, np.inf) self.plot_x_scale =", "0, top)[0] def random(self, size, alpha, beta, mu): U =", "3 self.bounds = ((0, None), (0, None), (0, None),) self.support", "alpha, beta, mu): r\"\"\" Instantaneous hazard rate for the ExpoWeibull", "ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436])", "at which the quantile will be calculated alpha : numpy", "(-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu): func =", "r\"\"\" Failure (CDF or unreliability) function for the ExpoWeibull Distribution:", "0.99946528]) \"\"\" return np.power(1 - np.exp(-(x / alpha)**beta), mu) def", "'mu'] self.param_map = { 'alpha' : 0, 'beta' : 1,", "top)[0] def random(self, size, alpha, beta, mu): U = uniform.rvs(size=size)", ">>> from surpyval import ExpoWeibull >>> x = np.array([1, 2,", "= Parameters ---------- p : numpy array or scalar The", "the ExpoWeibull Distribution: .. math:: F(x) = \\left [ 1", "log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x, c, n, how='MLE') if", "the instantaneous hazard rate at x. Examples -------- >>> import", "\"\"\" return self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta,", "mu * x**(beta - 1)) / (alpha**beta) \\ * (1", "= 1. if offset: gamma = np.min(x) - (np.max(x) -", "self.qf(U, alpha, beta, mu) def mpp_x_transform(self, x, gamma=0): return np.log(x", "import ndtri as z from scipy import integrate from scipy.optimize", "ExpoWeibull distribution mu : numpy array or scalar shape parameter", "2.66992747, 2.85807988]) \"\"\" return alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def", "parameter for the ExpoWeibull distribution Returns ------- hf : scalar", ".. math:: h(x) = \\frac{f(x)}{R(x)} Parameters ---------- x : numpy", "offset: gamma = np.min(x) - (np.max(x) - np.min(x))/10. return gamma,", "= np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta = 1. if", "Distribution: .. math:: F(x) = \\left [ 1 - e^{-\\left", "1) \\ * np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta, mu):", "mask = ((y == 0) | (y == 1)) out", "mu): r\"\"\" Density function for the ExpoWeibull Distribution: .. math::", "= gumb.params alpha, beta = np.exp(mu), 1. / sigma if", "= \\frac{f(x)}{R(x)} Parameters ---------- x : numpy array or scalar", "self.sf(x, alpha, beta, mu) def Hf(self, x, alpha, beta, mu):", "from surpyval import ExpoWeibull >>> x = np.array([1, 2, 3,", "return -np.log(self.sf(x, alpha, beta, mu)) def qf(self, p, alpha, beta,", "the Weibull distribution at each value p Examples -------- >>>", "as np >>> from surpyval import ExpoWeibull >>> p =", "np.log(x) log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x, c, n, how='MLE')", "x * self.df(x, alpha, beta, mu) top = 2 *", "import uniform from autograd import jacobian from numpy import euler_gamma", "np.nan return out def mpp_inv_y_transform(self, y, *params): i = len(params)", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.df(x, 3,", "Returns ------- Q : scalar or numpy array The quantiles", "self.bounds = ((0, None), (0, None), (0, None),) self.support =", "c, n, how='MLE') if not gumb.res.success: gumb = para.Gumbel.fit(log_x, c,", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2)", "alpha, beta, mu): r\"\"\" Density function for the ExpoWeibull Distribution:", "np.power(1 - np.exp(-(x / alpha)**beta), mu) def cs(self, x, X,", "e^{- \\left ( \\frac{x}{\\alpha} \\right )^\\beta} Parameters ---------- x :", "= params[-1] mask = ((y == 0) | (y ==", "autograd import jacobian from numpy import euler_gamma from scipy.special import", "0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999,", "params[-1] mask = ((y == 0) | (y == 1))", "------- hf : scalar or numpy array The value(s) of", "top = 2 * self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func,", "mpp_y_transform(self, y, *params): mu = params[-1] mask = ((y ==", "math:: R(x, X) = \\frac{R(x + X)}{R(X)} Parameters ---------- x", "from scipy.stats import uniform from autograd import jacobian from numpy", "0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2,", "as np >>> from surpyval import ExpoWeibull >>> x =", "the failure function at x. Examples -------- >>> import numpy", "1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\"", "for the ExpoWeibull distribution Returns ------- df : scalar or", "the ExpoWeibull distribution Returns ------- Q : scalar or numpy", "scalar or numpy array The value(s) of the density function", "= np.nan return out def mpp_inv_y_transform(self, y, *params): i =", "1 - \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right", "np.power(1 - np.exp(-(x / alpha)**beta), mu) def ff(self, x, alpha,", "ME if rr == 'y': beta = params[0] alpha =", "function for the ExpoWeibull Distribution: .. math:: R(x, X) =", "numpy array or scalar The percentiles at which the quantile", "/ sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x) if", "or scalar scale parameter for the ExpoWeibull distribution beta :", "math:: q(p) = Parameters ---------- p : numpy array or", "The values at which the function will be calculated alpha", "of the failure function at x. Examples -------- >>> import", ">>> ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04,", "- 1) \\ * np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta,", "def mpp_inv_y_transform(self, y, *params): i = len(params) mu = params[i-1]", "1.26867613, 3.14672068, 6.17256436]) \"\"\" return self.df(x, alpha, beta, mu) /", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.df(x, 3, 4, 1.2)", "x, alpha, beta, mu): r\"\"\" Survival (or reliability) function for", "mu) def Hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard", "def df(self, x, alpha, beta, mu): r\"\"\" Density function for", "mu = params[i-1] return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params,", "def _parameter_initialiser(self, x, c=None, n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)]", ") Parameters ---------- x : numpy array or scalar The", "return integrate.quadrature(func, 0, top)[0] def random(self, size, alpha, beta, mu):", "scalar shape parameter for the ExpoWeibull distribution Returns ------- Hf", "the function will be calculated alpha : numpy array or", "euler_gamma from scipy.special import gamma as gamma_func from scipy.special import", "the ExpoWeibull distribution Returns ------- Hf : scalar or numpy", "alpha, beta, mu): r\"\"\" Failure (CDF or unreliability) function for", "5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701 , 1.26867613,", "uniform.rvs(size=size) return self.qf(U, alpha, beta, mu) def mpp_x_transform(self, x, gamma=0):", "function for the ExpoWeibull Distribution: .. math:: F(x) = \\left", "ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\"", "array or scalar scale parameter for the ExpoWeibull distribution beta", "density function at x. Examples -------- >>> import numpy as", "shape parameter for the ExpoWeibull distribution Returns ------- Hf :", "x, alpha, beta, mu): r\"\"\" Density function for the ExpoWeibull", "self.sf(X, alpha, beta, mu) def df(self, x, alpha, beta, mu):", "mu : numpy array or scalar shape parameter for the", "hazard rate for the ExpoWeibull Distribution: .. math:: q(p) =", ")^{\\beta - 1} \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha}", "== 1)) out = np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu))))", "import ExpoWeibull >>> p = np.array([.1, .2, .3, .4, .5])", "( \\frac{x}{\\alpha} \\right )^{\\beta - 1} \\left [ 1 -", "from scipy.special import ndtri as z from scipy import integrate", "]^{\\mu} Parameters ---------- x : numpy array or scalar The", "failure function at x. Examples -------- >>> import numpy as", "distribution Returns ------- Hf : scalar or numpy array The", "alpha, beta, mu): U = uniform.rvs(size=size) return self.qf(U, alpha, beta,", "1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528]) \"\"\" return np.power(1", "\\right )^{\\beta - 1} \\left [ 1 - e^{-\\left (", "\\frac{R(x + X)}{R(X)} Parameters ---------- x : numpy array or", "beta = params[0] alpha = np.exp(params[1]/-beta) elif rr == 'x':", "def __init__(self, name): self.name = name self.k = 3 self.bounds", "4, 5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701 ,", "np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.hf(x, 3,", "len(params) mu = params[i-1] return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self,", "\\right ]^{\\mu} Parameters ---------- x : numpy array or scalar", "params[0] alpha = np.exp(params[1]/-beta) elif rr == 'x': beta =", "- 1} e^{- \\left ( \\frac{x}{\\alpha} \\right )^\\beta} Parameters ----------", "def sf(self, x, alpha, beta, mu): r\"\"\" Survival (or reliability)", "3, 4, 5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01,", "value(s) of the reliability function at x. Examples -------- >>>", "e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu - 1} e^{-", "or numpy array The value(s) of the density function at", ")^\\beta} \\right ]^{\\mu} Parameters ---------- x : numpy array or", "sf : scalar or numpy array The value(s) of the", "2, 3, 4, 5]) >>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515,", "alpha, beta = np.exp(mu), 1. / sigma if (np.isinf(alpha) |", "return np.power(1 - np.exp(-(x / alpha)**beta), mu) def cs(self, x,", "array The quantiles for the Weibull distribution at each value", "Hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard rate for", "The value(s) of the failure function at x. Examples --------", "- gamma) def mpp_y_transform(self, y, *params): mu = params[-1] mask", "= -\\ln \\left ( R(x) \\right ) Parameters ---------- x", "or scalar The percentiles at which the quantile will be", "ExpoWeibull >>> p = np.array([.1, .2, .3, .4, .5]) >>>", "gumb = para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma = gumb.params", "shape parameter for the ExpoWeibull distribution Returns ------- sf :", ": numpy array or scalar The values at which the", "- \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta}", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.df(x, 3, 4,", "array The value(s) of the cumulative hazard rate at x.", "scalar shape parameter for the ExpoWeibull distribution Returns ------- Q", "function will be calculated alpha : numpy array or scalar", "ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07])", "5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x + X, alpha, beta,", "self.df(x, alpha, beta, mu) top = 2 * self.qf(0.999, alpha,", "- e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu} Parameters ----------", "\\right ]^{\\mu - 1} e^{- \\left ( \\frac{x}{\\alpha} \\right )^\\beta}", "numpy array The value(s) of the density function at x.", "self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005, 0.01,", "ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5]) >>>", "integrate.quadrature(func, 0, top)[0] def random(self, size, alpha, beta, mu): U", "np >>> from surpyval import ExpoWeibull >>> p = np.array([.1,", ">>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058])", ">>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.df(x,", "sigma = gumb.params alpha, beta = np.exp(mu), 1. / sigma", "return self.df(x, alpha, beta, mu) / self.sf(x, alpha, beta, mu)", "numpy array The value(s) of the reliability function at x.", "parameter for the ExpoWeibull distribution mu : numpy array or", "0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8,", "\\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu} Parameters ---------- x : numpy", "Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: h(x)", "array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436]) \"\"\" return self.df(x, alpha,", "def Hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard rate", ".. math:: H(x) = -\\ln \\left ( R(x) \\right )", "mu) return integrate.quadrature(func, 0, top)[0] def random(self, size, alpha, beta,", "\\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu - 1} e^{- \\left (", "the cumulative hazard rate at x. Examples -------- >>> import", "parametric as para from surpyval import nonparametric as nonp from", "beta, mu): r\"\"\" Conditional survival (or reliability) function for the", "the ExpoWeibull distribution beta : numpy array or scalar shape", "alpha, beta, mu): func = lambda x : x *", "---------- x : numpy array or scalar The values at", "= np.log(x) log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x, c, n,", "params[i-1] return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE", "def unpack_rr(self, params, rr): #UPDATE ME if rr == 'y':", "np from scipy.stats import uniform from autograd import jacobian from", "if (np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta) |", "[ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu}", "rr == 'x': beta = 1./params[0] alpha = np.exp(params[1] /", "beta : numpy array or scalar shape parameter for the", "mpp_x_transform(self, x, gamma=0): return np.log(x - gamma) def mpp_y_transform(self, y,", "beta, mu): r\"\"\" Density function for the ExpoWeibull Distribution: ..", ">>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x,", "r\"\"\" Density function for the ExpoWeibull Distribution: .. math:: f(x)", "4, 5]) >>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385,", "array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528]) \"\"\" return np.power(1 -", "def ff(self, x, alpha, beta, mu): r\"\"\" Failure (CDF or", "def cs(self, x, X, alpha, beta, mu): r\"\"\" Conditional survival", "Weibull distribution at each value p Examples -------- >>> import", "0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999]", "1)) out = np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask]", "'beta', 'mu'] self.param_map = { 'alpha' : 0, 'beta' :", "quantile will be calculated alpha : numpy array or scalar", "- np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE ME if rr", "]^{\\mu - 1} e^{- \\left ( \\frac{x}{\\alpha} \\right )^\\beta} Parameters", "= para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma = gumb.params alpha,", "ExpoWeibull distribution beta : numpy array or scalar shape parameter", "the ExpoWeibull distribution Returns ------- sf : scalar or numpy", "3, 4, 5]) >>> ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01,", "distribution Returns ------- Q : scalar or numpy array The", "ExpoWeibull Distribution: .. math:: f(x) = \\mu \\left ( \\frac{\\beta}{\\alpha}", "from numpy import euler_gamma from scipy.special import gamma as gamma_func", ": scalar or numpy array The value(s) of the failure", "x**(beta - 1)) / (alpha**beta) \\ * (1 - np.exp(-(x/alpha)**beta))**(mu", "5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x + X, alpha, beta, mu)", "(y == 1)) out = np.zeros_like(y) out[~mask] = np.log(-np.log((1 -", "3, 4, 1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528]) \"\"\"", "from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter):", "out = np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] =", "H(x) = -\\ln \\left ( R(x) \\right ) Parameters ----------", "array The value(s) of the density function at x. Examples", "sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta)", "if not gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n, how='MPP') mu,", "mu = params[-1] mask = ((y == 0) | (y", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 3, 4,", "scipy.optimize import minimize from surpyval import parametric as para from", "3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\" return", "the quantile will be calculated alpha : numpy array or", "1)) / (alpha**beta) \\ * (1 - np.exp(-(x/alpha)**beta))**(mu - 1)", "from autograd import jacobian from numpy import euler_gamma from scipy.special", "------- Q : scalar or numpy array The quantiles for", "= lambda x : x * self.df(x, alpha, beta, mu)", "x, c=None, n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] = 0", ".4, .5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045 ,", "ndtri as z from scipy import integrate from scipy.optimize import", "return alpha, beta, 1. def sf(self, x, alpha, beta, mu):", "calculated alpha : numpy array or scalar scale parameter for", "* (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \\ * np.exp(-(x/alpha)**beta) def", "or numpy array The value(s) of the cumulative hazard rate", ".. math:: F(x) = \\left [ 1 - e^{-\\left (", "* self.df(x, alpha, beta, mu) top = 2 * self.qf(0.999,", "df(self, x, alpha, beta, mu): r\"\"\" Density function for the", "alpha, beta, 1. def sf(self, x, alpha, beta, mu): r\"\"\"", "shape parameter for the ExpoWeibull distribution mu : numpy array", "= np.min(x) - (np.max(x) - np.min(x))/10. return gamma, alpha, beta,", "or scalar shape parameter for the ExpoWeibull distribution Returns -------", "shape parameter for the ExpoWeibull distribution Returns ------- hf :", "= params[0] alpha = np.exp(params[1]/-beta) elif rr == 'x': beta", "(np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta) | np.isnan(beta)):", "array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return 1 - np.power(1", "surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def", "import mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name = name", "5]) >>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643,", "numpy array The value(s) of the cumulative hazard rate at", "return gamma, alpha, beta, 1. else: return alpha, beta, 1.", "e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu} Parameters ---------- x", "the ExpoWeibull distribution Returns ------- hf : scalar or numpy", "math:: h(x) = \\frac{f(x)}{R(x)} Parameters ---------- x : numpy array", "(1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE ME if", "parameter for the ExpoWeibull distribution Returns ------- Q : scalar", "for the ExpoWeibull Distribution: .. math:: F(x) = \\left [", "scalar The percentiles at which the quantile will be calculated", "4, 5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01,", "p Examples -------- >>> import numpy as np >>> from", "| np.isnan(beta)): beta = 1. if offset: gamma = np.min(x)", "n, how='MPP') mu, sigma = gumb.params alpha, beta = np.exp(mu),", "0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95,", "mu): func = lambda x : x * self.df(x, alpha,", "Instantaneous hazard rate for the ExpoWeibull Distribution: .. math:: H(x)", "numpy array The value(s) of the instantaneous hazard rate at", "each value p Examples -------- >>> import numpy as np", "np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan return out def mpp_inv_y_transform(self,", "self.param_map = { 'alpha' : 0, 'beta' : 1, 'mu'", ": scalar or numpy array The value(s) of the instantaneous", "'alpha' : 0, 'beta' : 1, 'mu' : 2 }", "0.999, 0.9999] self.param_names = ['alpha', 'beta', 'mu'] self.param_map = {", "the ExpoWeibull distribution Returns ------- df : scalar or numpy", "1, 'mu' : 2 } def _parameter_initialiser(self, x, c=None, n=None,", "+ X, alpha, beta, mu) / self.sf(X, alpha, beta, mu)", "para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma = gumb.params alpha, beta", "cs(self, x, X, alpha, beta, mu): r\"\"\" Conditional survival (or", "nonp from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import mpp class", "surpyval import parametric as para from surpyval import nonparametric as", "R(x) \\right ) Parameters ---------- x : numpy array or", "0.15943643, 0.00330058]) \"\"\" return (beta * mu * x**(beta -", "self.k = 3 self.bounds = ((0, None), (0, None), (0,", "\\left ( \\frac{x}{\\alpha} \\right )^\\beta} Parameters ---------- x : numpy", "( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu} Parameters ---------- x :", "beta, mu): func = lambda x : x * self.df(x,", "4, 5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01,", "4, 1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436]) \"\"\" return", "= ((y == 0) | (y == 1)) out =", "distribution at each value p Examples -------- >>> import numpy", "U = uniform.rvs(size=size) return self.qf(U, alpha, beta, mu) def mpp_x_transform(self,", "7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha, beta, mu)) def qf(self, p,", "from surpyval import parametric as para from surpyval import nonparametric", "the ExpoWeibull Distribution: .. math:: R(x) = 1 - \\left", "1. / sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha = np.median(x)", "2, 3, 4, 5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931,", "1.35732908e-07]) \"\"\" return self.sf(x + X, alpha, beta, mu) /", "c, n, how='MPP') mu, sigma = gumb.params alpha, beta =", "alpha : numpy array or scalar scale parameter for the", "beta = 1./params[0] alpha = np.exp(params[1] / (beta * params[0]))", "Examples -------- >>> import numpy as np >>> from surpyval", "n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] = 0 gumb =", "hazard rate at x. Examples -------- >>> import numpy as", "{ 'alpha' : 0, 'beta' : 1, 'mu' : 2", "mu): U = uniform.rvs(size=size) return self.qf(U, alpha, beta, mu) def", "<gh_stars>0 import autograd.numpy as np from scipy.stats import uniform from", "return self.sf(x + X, alpha, beta, mu) / self.sf(X, alpha,", "(beta * mu * x**(beta - 1)) / (alpha**beta) \\", "beta = 1. if offset: gamma = np.min(x) - (np.max(x)", "for the ExpoWeibull Distribution: .. math:: q(p) = Parameters ----------", "numpy array The value(s) of the failure function at x.", "mu) def ff(self, x, alpha, beta, mu): r\"\"\" Failure (CDF", "0) | (y == 1)) out = np.zeros_like(y) out[~mask] =", "x : numpy array or scalar The values at which", ": numpy array or scalar The percentiles at which the", "mean(self, alpha, beta, mu): func = lambda x : x", "math:: F(x) = \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha}", "hf : scalar or numpy array The value(s) of the", "np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta = 1. if offset:", "( R(x) \\right ) Parameters ---------- x : numpy array", "\\left ( R(x) \\right ) Parameters ---------- x : numpy", "how='MPP') mu, sigma = gumb.params alpha, beta = np.exp(mu), 1.", ", 2.46627621, 2.66992747, 2.85807988]) \"\"\" return alpha * (-np.log(1 -", "beta, mu): r\"\"\" Instantaneous hazard rate for the ExpoWeibull Distribution:", "alpha, beta, mu)) def qf(self, p, alpha, beta, mu): r\"\"\"", "or numpy array The value(s) of the instantaneous hazard rate", "import euler_gamma from scipy.special import gamma as gamma_func from scipy.special", "(CDF or unreliability) function for the ExpoWeibull Distribution: .. math::", "2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 1, 3, 4, 1.2)", "rate at x. Examples -------- >>> import numpy as np", "0 gumb = para.Gumbel.fit(log_x, c, n, how='MLE') if not gumb.res.success:", "mu): r\"\"\" Survival (or reliability) function for the ExpoWeibull Distribution:", "beta, mu): r\"\"\" Failure (CDF or unreliability) function for the", "ff(self, x, alpha, beta, mu): r\"\"\" Failure (CDF or unreliability)", "2.2261045 , 2.46627621, 2.66992747, 2.85807988]) \"\"\" return alpha * (-np.log(1", "8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return 1 - np.power(1 -", "= (0, np.inf) self.plot_x_scale = 'log' self.y_ticks = [0.0001, 0.0002,", "= np.exp(params[1] / (beta * params[0])) return alpha, beta, 1.", ">>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.ff(x,", "math:: R(x) = 1 - \\left [ 1 - e^{-\\left", "Returns ------- hf : scalar or numpy array The value(s)", "= np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan return out def", "1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436]) \"\"\" return self.df(x,", "- y[~mask]**(1./mu)))) out[mask] = np.nan return out def mpp_inv_y_transform(self, y,", "mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name = name self.k", ".. math:: R(x, X) = \\frac{R(x + X)}{R(X)} Parameters ----------", "[ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu", "ExpoWeibull Distribution: .. math:: H(x) = -\\ln \\left ( R(x)", "ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988])", "ParametricFitter from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name):", "array or scalar The values at which the function will", "for the ExpoWeibull Distribution: .. math:: R(x, X) = \\frac{R(x", "y[~mask]**(1./mu)))) out[mask] = np.nan return out def mpp_inv_y_transform(self, y, *params):", "import gamma as gamma_func from scipy.special import ndtri as z", "reliability) function for the ExpoWeibull Distribution: .. math:: R(x) =", "nonparametric as nonp from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp import", "integrate from scipy.optimize import minimize from surpyval import parametric as", ": numpy array or scalar scale parameter for the ExpoWeibull", "ExpoWeibull Distribution: .. math:: F(x) = \\left [ 1 -", "parameter for the ExpoWeibull distribution Returns ------- df : scalar", "4, 1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988]) \"\"\" return", "= ((0, None), (0, None), (0, None),) self.support = (0,", "log_x = np.log(x) log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x, c,", "4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x + X, alpha,", "y, *params): i = len(params) mu = params[i-1] return (1", "rate for the ExpoWeibull Distribution: .. math:: q(p) = Parameters", "ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name = name self.k = 3", "np.exp(-(x / alpha)**beta), mu) def ff(self, x, alpha, beta, mu):", "beta, mu): U = uniform.rvs(size=size) return self.qf(U, alpha, beta, mu)", "qf(self, p, alpha, beta, mu): r\"\"\" Instantaneous hazard rate for", "at each value p Examples -------- >>> import numpy as", "ExpoWeibull distribution Returns ------- hf : scalar or numpy array", "2 * self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func, 0, top)[0]", "['alpha', 'beta', 'mu'] self.param_map = { 'alpha' : 0, 'beta'", "np.min(x) - (np.max(x) - np.min(x))/10. return gamma, alpha, beta, 1.", "\\frac{x}{\\alpha} \\right )^{\\beta - 1} \\left [ 1 - e^{-\\left", "ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528])", "R(x, X) = \\frac{R(x + X)}{R(X)} Parameters ---------- x :", "np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan return", "(np.isinf(beta) | np.isnan(beta)): beta = 1. if offset: gamma =", "else: return alpha, beta, 1. def sf(self, x, alpha, beta,", "numpy as np >>> from surpyval import ExpoWeibull >>> p", "\\left ( \\frac{\\beta}{\\alpha} \\right ) \\left ( \\frac{x}{\\alpha} \\right )^{\\beta", "para.Gumbel.fit(log_x, c, n, how='MLE') if not gumb.res.success: gumb = para.Gumbel.fit(log_x,", "0.9999] self.param_names = ['alpha', 'beta', 'mu'] self.param_map = { 'alpha'", "numpy array or scalar The values at which the function", "import jacobian from numpy import euler_gamma from scipy.special import gamma", ")^\\beta} \\right ]^{\\mu - 1} e^{- \\left ( \\frac{x}{\\alpha} \\right", "beta, mu) def Hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous", ">>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068,", "Distribution: .. math:: q(p) = Parameters ---------- p : numpy", "value p Examples -------- >>> import numpy as np >>>", "return out def mpp_inv_y_transform(self, y, *params): i = len(params) mu", "ExpoWeibull distribution Returns ------- Hf : scalar or numpy array", "------- Hf : scalar or numpy array The value(s) of", "(np.max(x) - np.min(x))/10. return gamma, alpha, beta, 1. else: return", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.ff(x, 3, 4,", "func = lambda x : x * self.df(x, alpha, beta,", "from scipy.optimize import minimize from surpyval import parametric as para", "out[mask] = np.nan return out def mpp_inv_y_transform(self, y, *params): i", "scipy.special import ndtri as z from scipy import integrate from", ": x * self.df(x, alpha, beta, mu) top = 2", "beta, mu) / self.sf(X, alpha, beta, mu) def df(self, x,", "numpy array The quantiles for the Weibull distribution at each", "import ParametricFitter from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def __init__(self,", "how='MLE') if not gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n, how='MPP')", "as para from surpyval import nonparametric as nonp from surpyval.parametric.parametric_fitter", "\\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right", "0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5,", "name): self.name = name self.k = 3 self.bounds = ((0,", "Distribution: .. math:: R(x) = 1 - \\left [ 1", "elif rr == 'x': beta = 1./params[0] alpha = np.exp(params[1]", "5.06674866e-02, 5.34717283e-04]) \"\"\" return 1 - np.power(1 - np.exp(-(x /", "reliability function at x. Examples -------- >>> import numpy as", "Returns ------- Hf : scalar or numpy array The value(s)", "( \\frac{x}{\\alpha} \\right )^\\beta} Parameters ---------- x : numpy array", "0.53701385, 0.15943643, 0.00330058]) \"\"\" return (beta * mu * x**(beta", "value(s) of the failure function at x. Examples -------- >>>", "1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha,", "/ (alpha**beta) \\ * (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \\", "/ alpha)**beta), mu) def ff(self, x, alpha, beta, mu): r\"\"\"", "1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha, beta, mu))", "((y == 0) | (y == 1)) out = np.zeros_like(y)", "p : numpy array or scalar The percentiles at which", "* x**(beta - 1)) / (alpha**beta) \\ * (1 -", "= len(params) mu = params[i-1] return (1 - np.exp(-np.exp(y)))**mu def", ">>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.Hf(x,", "3, 4, 5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975", "p = np.array([.1, .2, .3, .4, .5]) >>> ExpoWeibull.qf(p, 3,", "be calculated alpha : numpy array or scalar scale parameter", "Returns ------- df : scalar or numpy array The value(s)", "beta, mu) def mpp_x_transform(self, x, gamma=0): return np.log(x - gamma)", "mu): r\"\"\" Failure (CDF or unreliability) function for the ExpoWeibull", "0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.05,", "array The value(s) of the failure function at x. Examples", "r\"\"\" Conditional survival (or reliability) function for the ExpoWeibull Distribution:", "= ['alpha', 'beta', 'mu'] self.param_map = { 'alpha' : 0,", "alpha, beta, mu) def mpp_x_transform(self, x, gamma=0): return np.log(x -", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.Hf(x, 3, 4,", "or numpy array The value(s) of the failure function at", "x, X, alpha, beta, mu): r\"\"\" Conditional survival (or reliability)", "\\frac{x}{\\alpha} \\right )^\\beta} Parameters ---------- x : numpy array or", "class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name = name self.k =", "0.57671321, 0.94933251, 0.99946528]) \"\"\" return np.power(1 - np.exp(-(x / alpha)**beta),", "for the ExpoWeibull distribution Returns ------- hf : scalar or", "beta, mu)) def qf(self, p, alpha, beta, mu): r\"\"\" Instantaneous", "#UPDATE ME if rr == 'y': beta = params[0] alpha", "np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE ME if rr ==", ": numpy array or scalar shape parameter for the ExpoWeibull", "( \\frac{\\beta}{\\alpha} \\right ) \\left ( \\frac{x}{\\alpha} \\right )^{\\beta -", "0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3,", "1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x +", "/ alpha)**beta), mu) def cs(self, x, X, alpha, beta, mu):", "math:: H(x) = -\\ln \\left ( R(x) \\right ) Parameters", ">>> import numpy as np >>> from surpyval import ExpoWeibull", "surpyval import ExpoWeibull >>> x = np.array([1, 2, 3, 4,", "unreliability) function for the ExpoWeibull Distribution: .. math:: F(x) =", "of the density function at x. Examples -------- >>> import", "\"\"\" return self.sf(x + X, alpha, beta, mu) / self.sf(X,", "function at x. Examples -------- >>> import numpy as np", "5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02,", "out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan return out", "2.85807988]) \"\"\" return alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self,", "as np from scipy.stats import uniform from autograd import jacobian", "[0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03,", "The value(s) of the reliability function at x. Examples --------", "def mean(self, alpha, beta, mu): func = lambda x :", "2 } def _parameter_initialiser(self, x, c=None, n=None, offset=False): log_x =", "offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] = 0 gumb = para.Gumbel.fit(log_x,", "beta, mu): r\"\"\" Survival (or reliability) function for the ExpoWeibull", "1 - np.power(1 - np.exp(-(x / alpha)**beta), mu) def ff(self,", "for the ExpoWeibull Distribution: .. math:: f(x) = \\mu \\left", "alpha, beta, mu): r\"\"\" Survival (or reliability) function for the", "/ (beta * params[0])) return alpha, beta, 1. ExpoWeibull =", "return alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha, beta,", "4, 1.2) array([0.00508867, 0.1270975 , 0.57671321, 0.94933251, 0.99946528]) \"\"\" return", "- np.power(1 - np.exp(-(x / alpha)**beta), mu) def ff(self, x,", "0.95, 0.99, 0.999, 0.9999] self.param_names = ['alpha', 'beta', 'mu'] self.param_map", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2)", "surpyval import ExpoWeibull >>> p = np.array([.1, .2, .3, .4,", "return self.qf(U, alpha, beta, mu) def mpp_x_transform(self, x, gamma=0): return", "mu) / self.sf(X, alpha, beta, mu) def df(self, x, alpha,", "of the reliability function at x. Examples -------- >>> import", "0.99, 0.999, 0.9999] self.param_names = ['alpha', 'beta', 'mu'] self.param_map =", "ExpoWeibull Distribution: .. math:: R(x, X) = \\frac{R(x + X)}{R(X)}", "Parameters ---------- x : numpy array or scalar The values", "ExpoWeibull distribution Returns ------- df : scalar or numpy array", "= np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.hf(x, 3, 4,", "------- sf : scalar or numpy array The value(s) of", "def mpp_y_transform(self, y, *params): mu = params[-1] mask = ((y", "\"\"\" return np.power(1 - np.exp(-(x / alpha)**beta), mu) def cs(self,", "beta, 1. else: return alpha, beta, 1. def sf(self, x,", "f(x) = \\mu \\left ( \\frac{\\beta}{\\alpha} \\right ) \\left (", "scalar shape parameter for the ExpoWeibull distribution Returns ------- sf", ".. math:: f(x) = \\mu \\left ( \\frac{\\beta}{\\alpha} \\right )", "alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu):", "*params): i = len(params) mu = params[i-1] return (1 -", "\\mu \\left ( \\frac{\\beta}{\\alpha} \\right ) \\left ( \\frac{x}{\\alpha} \\right", "for the ExpoWeibull Distribution: .. math:: H(x) = -\\ln \\left", ">>> from surpyval import ExpoWeibull >>> p = np.array([.1, .2,", "---------- p : numpy array or scalar The percentiles at", "| np.isnan(alpha)): alpha = np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta", "alpha, beta, mu) def df(self, x, alpha, beta, mu): r\"\"\"", "gamma) def mpp_y_transform(self, y, *params): mu = params[-1] mask =", "R(x) = 1 - \\left [ 1 - e^{-\\left (", "(0, None),) self.support = (0, np.inf) self.plot_x_scale = 'log' self.y_ticks", "3, 4, 5]) >>> ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838,", "hazard rate for the ExpoWeibull Distribution: .. math:: H(x) =", "import nonparametric as nonp from surpyval.parametric.parametric_fitter import ParametricFitter from .fitters.mpp", "survival (or reliability) function for the ExpoWeibull Distribution: .. math::", "x, alpha, beta, mu): r\"\"\" Instantaneous hazard rate for the", "import ExpoWeibull >>> x = np.array([1, 2, 3, 4, 5])", "X, alpha, beta, mu): r\"\"\" Conditional survival (or reliability) function", "Survival (or reliability) function for the ExpoWeibull Distribution: .. math::", "Density function for the ExpoWeibull Distribution: .. math:: f(x) =", "mu)) def qf(self, p, alpha, beta, mu): r\"\"\" Instantaneous hazard", "scipy.stats import uniform from autograd import jacobian from numpy import", ": 2 } def _parameter_initialiser(self, x, c=None, n=None, offset=False): log_x", "scalar shape parameter for the ExpoWeibull distribution mu : numpy", "which the quantile will be calculated alpha : numpy array", "x, gamma=0): return np.log(x - gamma) def mpp_y_transform(self, y, *params):", "parameter for the ExpoWeibull distribution Returns ------- sf : scalar", "- 1)) / (alpha**beta) \\ * (1 - np.exp(-(x/alpha)**beta))**(mu -", ") \\left ( \\frac{x}{\\alpha} \\right )^{\\beta - 1} \\left [", ".. math:: q(p) = Parameters ---------- p : numpy array", "2, 3, 4, 5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867,", "scalar or numpy array The value(s) of the instantaneous hazard", "p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu): func = lambda x", "distribution Returns ------- sf : scalar or numpy array The", "-np.log(self.sf(x, alpha, beta, mu)) def qf(self, p, alpha, beta, mu):", "for the ExpoWeibull distribution Returns ------- Hf : scalar or", "as z from scipy import integrate from scipy.optimize import minimize", "2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha, beta, mu)) def qf(self,", "mu) def cs(self, x, X, alpha, beta, mu): r\"\"\" Conditional", "hazard rate for the ExpoWeibull Distribution: .. math:: h(x) =", "not gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n, how='MPP') mu, sigma", "ExpoWeibull distribution Returns ------- sf : scalar or numpy array", "numpy as np >>> from surpyval import ExpoWeibull >>> x", "alpha)**beta), mu) def ff(self, x, alpha, beta, mu): r\"\"\" Failure", "mu): r\"\"\" Conditional survival (or reliability) function for the ExpoWeibull", "def qf(self, p, alpha, beta, mu): r\"\"\" Instantaneous hazard rate", "i = len(params) mu = params[i-1] return (1 - np.exp(-np.exp(y)))**mu", "alpha, beta, 1. else: return alpha, beta, 1. def sf(self,", "None),) self.support = (0, np.inf) self.plot_x_scale = 'log' self.y_ticks =", "or scalar The values at which the function will be", "c=None, n=None, offset=False): log_x = np.log(x) log_x[np.isnan(log_x)] = 0 gumb", "from surpyval import nonparametric as nonp from surpyval.parametric.parametric_fitter import ParametricFitter", "from .fitters.mpp import mpp class ExpoWeibull_(ParametricFitter): def __init__(self, name): self.name", "scalar or numpy array The value(s) of the cumulative hazard", "= params[i-1] return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr):", "np.exp(-(x/alpha)**beta))**(mu - 1) \\ * np.exp(-(x/alpha)**beta) def hf(self, x, alpha,", "uniform from autograd import jacobian from numpy import euler_gamma from", "(beta * params[0])) return alpha, beta, 1. ExpoWeibull = ExpoWeibull_('ExpoWeibull')", "1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988]) \"\"\" return alpha", "* self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func, 0, top)[0] def", "/ self.sf(x, alpha, beta, mu) def Hf(self, x, alpha, beta,", "distribution mu : numpy array or scalar shape parameter for", "\\ * np.exp(-(x/alpha)**beta) def hf(self, x, alpha, beta, mu): r\"\"\"", "def hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard rate", "0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names = ['alpha', 'beta', 'mu']", "= \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta}", "or numpy array The value(s) of the reliability function at", "'x': beta = 1./params[0] alpha = np.exp(params[1] / (beta *", "= np.exp(params[1]/-beta) elif rr == 'x': beta = 1./params[0] alpha", "\\left ( \\frac{x}{\\alpha} \\right )^{\\beta - 1} \\left [ 1", "from surpyval import ExpoWeibull >>> p = np.array([.1, .2, .3,", "x, alpha, beta, mu): r\"\"\" Failure (CDF or unreliability) function", "import numpy as np >>> from surpyval import ExpoWeibull >>>", "np.inf) self.plot_x_scale = 'log' self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001,", "\\right )^\\beta} \\right ]^{\\mu - 1} e^{- \\left ( \\frac{x}{\\alpha}", ", 1.26867613, 3.14672068, 6.17256436]) \"\"\" return self.df(x, alpha, beta, mu)", "+ X)}{R(X)} Parameters ---------- x : numpy array or scalar", "the ExpoWeibull Distribution: .. math:: R(x, X) = \\frac{R(x +", "array or scalar shape parameter for the ExpoWeibull distribution Returns", "-------- >>> import numpy as np >>> from surpyval import", "autograd.numpy as np from scipy.stats import uniform from autograd import", "'mu' : 2 } def _parameter_initialiser(self, x, c=None, n=None, offset=False):", "8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\" return -np.log(self.sf(x, alpha, beta, mu)) def", "scalar shape parameter for the ExpoWeibull distribution Returns ------- hf", "alpha, beta, mu) return integrate.quadrature(func, 0, top)[0] def random(self, size,", "= { 'alpha' : 0, 'beta' : 1, 'mu' :", "} def _parameter_initialiser(self, x, c=None, n=None, offset=False): log_x = np.log(x)", "or unreliability) function for the ExpoWeibull Distribution: .. math:: F(x)", "mu, sigma = gumb.params alpha, beta = np.exp(mu), 1. /", "y, *params): mu = params[-1] mask = ((y == 0)", "/ self.sf(X, alpha, beta, mu) def df(self, x, alpha, beta,", "0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,", "4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02, 5.37452200e-04, 1.35732908e-07]) \"\"\" return self.sf(x", "3.14672068, 6.17256436]) \"\"\" return self.df(x, alpha, beta, mu) / self.sf(x,", ": scalar or numpy array The value(s) of the cumulative", "5]) >>> ExpoWeibull.ff(x, 3, 4, 1.2) array([0.00508867, 0.1270975 , 0.57671321,", "= name self.k = 3 self.bounds = ((0, None), (0,", "((0, None), (0, None), (0, None),) self.support = (0, np.inf)", "The value(s) of the cumulative hazard rate at x. Examples", "beta, mu) def df(self, x, alpha, beta, mu): r\"\"\" Density", "instantaneous hazard rate at x. Examples -------- >>> import numpy", "self.name = name self.k = 3 self.bounds = ((0, None),", "0.94933251, 0.99946528]) \"\"\" return np.power(1 - np.exp(-(x / alpha)**beta), mu)", "def mpp_x_transform(self, x, gamma=0): return np.log(x - gamma) def mpp_y_transform(self,", ": 1, 'mu' : 2 } def _parameter_initialiser(self, x, c=None,", "the ExpoWeibull Distribution: .. math:: q(p) = Parameters ---------- p", "'log' self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003, 0.005,", "scipy.special import gamma as gamma_func from scipy.special import ndtri as", "\"\"\" return 1 - np.power(1 - np.exp(-(x / alpha)**beta), mu)", "numpy array or scalar shape parameter for the ExpoWeibull distribution", "Distribution: .. math:: R(x, X) = \\frac{R(x + X)}{R(X)} Parameters", "for the ExpoWeibull distribution Returns ------- sf : scalar or", "hf(self, x, alpha, beta, mu): r\"\"\" Instantaneous hazard rate for", "the ExpoWeibull distribution mu : numpy array or scalar shape", "__init__(self, name): self.name = name self.k = 3 self.bounds =", "the density function at x. Examples -------- >>> import numpy", "ExpoWeibull Distribution: .. math:: h(x) = \\frac{f(x)}{R(x)} Parameters ---------- x", "3, 4, 1.2) array([0.02439931, 0.3160701 , 1.26867613, 3.14672068, 6.17256436]) \"\"\"", "q(p) = Parameters ---------- p : numpy array or scalar", ": scalar or numpy array The quantiles for the Weibull", "scalar or numpy array The quantiles for the Weibull distribution", "np.log(x - gamma) def mpp_y_transform(self, y, *params): mu = params[-1]", "np >>> from surpyval import ExpoWeibull >>> x = np.array([1,", "= 2 * self.qf(0.999, alpha, beta, mu) return integrate.quadrature(func, 0,", "unpack_rr(self, params, rr): #UPDATE ME if rr == 'y': beta", "\\right )^\\beta} \\right ]^{\\mu} Parameters ---------- x : numpy array", "0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names = ['alpha',", "3, 4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return", "0.00330058]) \"\"\" return (beta * mu * x**(beta - 1))", "gamma = np.min(x) - (np.max(x) - np.min(x))/10. return gamma, alpha,", "df : scalar or numpy array The value(s) of the", "rr): #UPDATE ME if rr == 'y': beta = params[0]", "scalar scale parameter for the ExpoWeibull distribution beta : numpy", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 1,", "The quantiles for the Weibull distribution at each value p", "beta, mu) return integrate.quadrature(func, 0, top)[0] def random(self, size, alpha,", "which the function will be calculated alpha : numpy array", "None), (0, None),) self.support = (0, np.inf) self.plot_x_scale = 'log'", "1. def sf(self, x, alpha, beta, mu): r\"\"\" Survival (or", ".5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045 , 2.46627621,", "| (y == 1)) out = np.zeros_like(y) out[~mask] = np.log(-np.log((1", "scalar shape parameter for the ExpoWeibull distribution Returns ------- df", "np.isnan(beta)): beta = 1. if offset: gamma = np.min(x) -", "(alpha**beta) \\ * (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \\ *", "of the instantaneous hazard rate at x. Examples -------- >>>", "0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 0.9999] self.param_names", "\\ * (1 - np.exp(-(x/alpha)**beta))**(mu - 1) \\ * np.exp(-(x/alpha)**beta)", "\\frac{f(x)}{R(x)} Parameters ---------- x : numpy array or scalar The", "= 0 gumb = para.Gumbel.fit(log_x, c, n, how='MLE') if not", "(or reliability) function for the ExpoWeibull Distribution: .. math:: R(x)", "2.46627621, 2.66992747, 2.85807988]) \"\"\" return alpha * (-np.log(1 - p**(1./mu)))**(1/beta)", ")^\\beta} Parameters ---------- x : numpy array or scalar The", "Distribution: .. math:: H(x) = -\\ln \\left ( R(x) \\right", "if (np.isinf(beta) | np.isnan(beta)): beta = 1. if offset: gamma", ">>> x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.hf(x,", "np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.Hf(x, 3, 4, 1.2)", "for the ExpoWeibull Distribution: .. math:: h(x) = \\frac{f(x)}{R(x)} Parameters", "3, 4, 1.2) array([1.89361341, 2.2261045 , 2.46627621, 2.66992747, 2.85807988]) \"\"\"", "function for the ExpoWeibull Distribution: .. math:: f(x) = \\mu", "The percentiles at which the quantile will be calculated alpha", "Conditional survival (or reliability) function for the ExpoWeibull Distribution: ..", "numpy array or scalar scale parameter for the ExpoWeibull distribution", "gamma_func from scipy.special import ndtri as z from scipy import", "(or reliability) function for the ExpoWeibull Distribution: .. math:: R(x,", "mu) top = 2 * self.qf(0.999, alpha, beta, mu) return", "Returns ------- sf : scalar or numpy array The value(s)", "np.exp(params[1]/-beta) elif rr == 'x': beta = 1./params[0] alpha =", "= 1./params[0] alpha = np.exp(params[1] / (beta * params[0])) return", "1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu -", "mu) def mpp_x_transform(self, x, gamma=0): return np.log(x - gamma) def", "x. Examples -------- >>> import numpy as np >>> from", "5]) >>> ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01, 5.09266354e-02,", "out def mpp_inv_y_transform(self, y, *params): i = len(params) mu =", "F(x) = \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha} \\right", "== 'x': beta = 1./params[0] alpha = np.exp(params[1] / (beta", "np.exp(-(x / alpha)**beta), mu) def cs(self, x, X, alpha, beta,", ">>> p = np.array([.1, .2, .3, .4, .5]) >>> ExpoWeibull.qf(p,", "- np.exp(-(x / alpha)**beta), mu) def cs(self, x, X, alpha,", "\\right ) Parameters ---------- x : numpy array or scalar", "values at which the function will be calculated alpha :", "scalar or numpy array The value(s) of the failure function", "4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\" return (beta", "alpha, beta, mu) / self.sf(x, alpha, beta, mu) def Hf(self,", "0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\" return (beta * mu *", "0.3160701 , 1.26867613, 3.14672068, 6.17256436]) \"\"\" return self.df(x, alpha, beta,", "3, 4, 5]) >>> ExpoWeibull.hf(x, 3, 4, 1.2) array([0.02439931, 0.3160701", "value(s) of the density function at x. Examples -------- >>>", ", 0.57671321, 0.94933251, 0.99946528]) \"\"\" return np.power(1 - np.exp(-(x /", "import autograd.numpy as np from scipy.stats import uniform from autograd", "* mu * x**(beta - 1)) / (alpha**beta) \\ *", "return 1 - np.power(1 - np.exp(-(x / alpha)**beta), mu) def", "= 'log' self.y_ticks = [0.0001, 0.0002, 0.0003, 0.001, 0.002, 0.003,", "beta = np.exp(mu), 1. / sigma if (np.isinf(alpha) | np.isnan(alpha)):", "sf(self, x, alpha, beta, mu): r\"\"\" Survival (or reliability) function", "parameter for the ExpoWeibull distribution beta : numpy array or", "- np.exp(-(x / alpha)**beta), mu) def ff(self, x, alpha, beta,", "np.array([.1, .2, .3, .4, .5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2)", "0.1270975 , 0.57671321, 0.94933251, 0.99946528]) \"\"\" return np.power(1 - np.exp(-(x", "ExpoWeibull Distribution: .. math:: q(p) = Parameters ---------- p :", "5.34717283e-04]) \"\"\" return 1 - np.power(1 - np.exp(-(x / alpha)**beta),", "self.param_names = ['alpha', 'beta', 'mu'] self.param_map = { 'alpha' :", "ExpoWeibull.df(x, 3, 4, 1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\"", "gamma, alpha, beta, 1. else: return alpha, beta, 1. def", "rate for the ExpoWeibull Distribution: .. math:: H(x) = -\\ln", "4, 1.2) array([9.94911330e-01, 8.72902497e-01, 4.23286791e-01, 5.06674866e-02, 5.34717283e-04]) \"\"\" return 1", "(1 - np.exp(-(x/alpha)**beta))**(mu - 1) \\ * np.exp(-(x/alpha)**beta) def hf(self,", "= 1 - \\left [ 1 - e^{-\\left ( \\frac{x}{\\alpha}", "import minimize from surpyval import parametric as para from surpyval", "\\right )^\\beta} Parameters ---------- x : numpy array or scalar", "for the ExpoWeibull distribution mu : numpy array or scalar", "1.2) array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\" return (beta *", "= np.exp(mu), 1. / sigma if (np.isinf(alpha) | np.isnan(alpha)): alpha", "return (1 - np.exp(-np.exp(y)))**mu def unpack_rr(self, params, rr): #UPDATE ME", "= 3 self.bounds = ((0, None), (0, None), (0, None),)", "2, 3, 4, 5]) >>> ExpoWeibull.sf(x, 3, 4, 1.2) array([9.94911330e-01,", "alpha = np.median(x) if (np.isinf(beta) | np.isnan(beta)): beta = 1.", "if offset: gamma = np.min(x) - (np.max(x) - np.min(x))/10. return", "scale parameter for the ExpoWeibull distribution beta : numpy array", "ExpoWeibull.Hf(x, 3, 4, 1.2) array([5.10166141e-03, 1.35931416e-01, 8.59705336e-01, 2.98247086e+00, 7.53377239e+00]) \"\"\"", "0.003, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2, 0.3, 0.4,", "0.0003, 0.001, 0.002, 0.003, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1,", "jacobian from numpy import euler_gamma from scipy.special import gamma as", "X, alpha, beta, mu) / self.sf(X, alpha, beta, mu) def", "= \\mu \\left ( \\frac{\\beta}{\\alpha} \\right ) \\left ( \\frac{x}{\\alpha}", "The value(s) of the instantaneous hazard rate at x. Examples", "of the cumulative hazard rate at x. Examples -------- >>>", "1./params[0] alpha = np.exp(params[1] / (beta * params[0])) return alpha,", "para from surpyval import nonparametric as nonp from surpyval.parametric.parametric_fitter import", "= np.zeros_like(y) out[~mask] = np.log(-np.log((1 - y[~mask]**(1./mu)))) out[mask] = np.nan", "np.exp(params[1] / (beta * params[0])) return alpha, beta, 1. ExpoWeibull", "= para.Gumbel.fit(log_x, c, n, how='MLE') if not gumb.res.success: gumb =", "gumb.params alpha, beta = np.exp(mu), 1. / sigma if (np.isinf(alpha)", "4, 5]) >>> ExpoWeibull.sf(x, 1, 3, 4, 1.2) array([8.77367129e-01, 4.25451775e-01,", "the ExpoWeibull Distribution: .. math:: f(x) = \\mu \\left (", "= \\frac{R(x + X)}{R(X)} Parameters ---------- x : numpy array", "ExpoWeibull distribution Returns ------- Q : scalar or numpy array", "rr == 'y': beta = params[0] alpha = np.exp(params[1]/-beta) elif", "-\\ln \\left ( R(x) \\right ) Parameters ---------- x :", "x = np.array([1, 2, 3, 4, 5]) >>> ExpoWeibull.ff(x, 3,", "gamma as gamma_func from scipy.special import ndtri as z from", "\\frac{\\beta}{\\alpha} \\right ) \\left ( \\frac{x}{\\alpha} \\right )^{\\beta - 1}", "1} e^{- \\left ( \\frac{x}{\\alpha} \\right )^\\beta} Parameters ---------- x", "- p**(1./mu)))**(1/beta) def mean(self, alpha, beta, mu): func = lambda", "distribution Returns ------- hf : scalar or numpy array The", "gumb = para.Gumbel.fit(log_x, c, n, how='MLE') if not gumb.res.success: gumb", "value(s) of the cumulative hazard rate at x. Examples --------", ": scalar or numpy array The value(s) of the reliability", "reliability) function for the ExpoWeibull Distribution: .. math:: R(x, X)", "(0, np.inf) self.plot_x_scale = 'log' self.y_ticks = [0.0001, 0.0002, 0.0003,", ".3, .4, .5]) >>> ExpoWeibull.qf(p, 3, 4, 1.2) array([1.89361341, 2.2261045", "alpha = np.exp(params[1]/-beta) elif rr == 'x': beta = 1./params[0]", "= uniform.rvs(size=size) return self.qf(U, alpha, beta, mu) def mpp_x_transform(self, x,", "1 - e^{-\\left ( \\frac{x}{\\alpha} \\right )^\\beta} \\right ]^{\\mu} Parameters", "array or scalar The percentiles at which the quantile will", "Distribution: .. math:: h(x) = \\frac{f(x)}{R(x)} Parameters ---------- x :", "None), (0, None), (0, None),) self.support = (0, np.inf) self.plot_x_scale", "\"\"\" return alpha * (-np.log(1 - p**(1./mu)))**(1/beta) def mean(self, alpha,", "== 'y': beta = params[0] alpha = np.exp(params[1]/-beta) elif rr", "lambda x : x * self.df(x, alpha, beta, mu) top", "array([0.02427515, 0.27589838, 0.53701385, 0.15943643, 0.00330058]) \"\"\" return (beta * mu", ".. math:: R(x) = 1 - \\left [ 1 -", "n, how='MLE') if not gumb.res.success: gumb = para.Gumbel.fit(log_x, c, n,", "shape parameter for the ExpoWeibull distribution Returns ------- Q :", "alpha, beta, mu) / self.sf(X, alpha, beta, mu) def df(self,", "for the ExpoWeibull Distribution: .. math:: R(x) = 1 -" ]
[ "== 6 #----------------- with data_context(airquality) as _: qt = stats.quantile(f.Temp)", "iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with", "3) tab = table(b, exclude=\"B\") assert tab.shape == (1, 2)", "['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab = table(state_division,", "with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool,", "24 #----------------- a = letters[:3] tab = table(a, sample(a)) assert", "\"cba\", dnn='x') assert tab.shape == (3,3) assert sum(tab.values.flatten()) == 3", ".conftest import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100,", "assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x", "f.Month) assert tab.iloc[0,0] == 24 #----------------- a = letters[:3] tab", "in tab.index def test_table_error(): from datar.datasets import iris, warpbreaks with", "= cut(f.Temp, qt) tab = table(ct, f.Month) assert tab.iloc[0,0] ==", "import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5)", "cut(f.Temp, qt) tab = table(ct, f.Month) assert tab.iloc[0,0] == 24", "qt = stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab = table(ct,", "import pytest from datar import stats from datar.base import *", "def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x =", "= table(a, sample(a)) assert sum(tab.values.flatten()) == 3 #----------------- tab =", "import iris, warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris)", "table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris.iloc[:, []])", "table(z) assert sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks) as _:", "tab.shape == (4, 4) tab = table(\"abc\", \"cba\", dnn='x') assert", "(1.0/(i+1) for i in range(3))) a = a * 10", "100 #----------------- with data_context(warpbreaks) as _: tab = table(f.wool, f.tension)", "as _: qt = stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab", "== 'y' #----------------- a = c(NA, Inf, (1.0/(i+1) for i", "# tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab", "= table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b =", "= factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(),", "10)) tab = table(b) assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(),", "data_context(warpbreaks) as _: tab = table(f.wool, f.tension) assert tab.columns.tolist() ==", "with data_context(airquality) as _: qt = stats.quantile(f.Temp) ct = cut(f.Temp,", "exclude=\"B\") assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2)", "2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B' not in tab.columns", "warpbreaks, state_division, state_region, airquality from .conftest import assert_iterable_equal def test_table():", "https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x = table(z) assert sum(x.values.flatten())", "assert tab.iloc[0,0] == 24 #----------------- a = letters[:3] tab =", "sample(a)) assert sum(tab.values.flatten()) == 3 #----------------- tab = table(a, sample(a),", "sum(tab.values.flatten()) == 3 with data_context(airquality) as _: tab = table(f.Ozone,", "tab.columns.name == 'y' #----------------- a = c(NA, Inf, (1.0/(i+1) for", "stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab = table(ct, f.Month) assert", "0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d,", "[\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2", "= table(b) assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10] *", "from datar.base import * from datar import f from datar.datasets", "[10] * 4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] *", "\"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10),", "from .conftest import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z =", "x = table(z) assert sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks)", "d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2, exclude=\"B\")", "== (4, 4) tab = table(\"abc\", \"cba\", dnn='x') assert tab.shape", "= table(f.wool, f.tension) assert tab.columns.tolist() == ['H', 'L', 'M'] assert", "ct = cut(f.Temp, qt) tab = table(ct, f.Month) assert tab.iloc[0,0]", "b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b) assert tab.shape ==", "'<NA>' in tab.index def test_table_error(): from datar.datasets import iris, warpbreaks", "table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with", "[9] * 6) tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist()", "import stats from datar.base import * from datar import f", "['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab = table(warpbreaks.loc[:, ['wool',", "assert tab.loc['New England', 'Northeast'] == 6 #----------------- with data_context(airquality) as", "a = letters[:3] tab = table(a, sample(a)) assert sum(tab.values.flatten()) ==", "= table(z) assert sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks) as", "table(a, sample(a)) assert sum(tab.values.flatten()) == 3 #----------------- tab = table(a,", "tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab =", "assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B' not in tab.columns #-------------------", "assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\"))", "10, 0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab =", "table(d, d2, exclude=\"B\") assert tab.shape == (4, 4) tab =", "3 with data_context(airquality) as _: tab = table(f.Ozone, f.Solar_R, exclude=None)", "pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris)", "table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []])", "tab = table(f.wool, f.tension) assert tab.columns.tolist() == ['H', 'L', 'M']", "tab.index def test_table_error(): from datar.datasets import iris, warpbreaks with pytest.raises(ValueError):", "== ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab =", "= table(d, d2, exclude=\"B\") assert tab.shape == (4, 4) tab", "= c(NA, Inf, (1.0/(i+1) for i in range(3))) a =", "assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab", "assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert", "'B' not in tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\"))", "assert_iterable_equal(tab.values.flatten(), [9] * 6) tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert", "d2, exclude=\"B\") assert tab.shape == (4, 4) tab = table(\"abc\",", "state_region) assert tab.loc['New England', 'Northeast'] == 6 #----------------- with data_context(airquality)", "assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() == ['A',", "#----------------- with data_context(warpbreaks) as _: tab = table(f.wool, f.tension) assert", "d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\", dnn=['x'])", "tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab", "= a * 10 # tab = table(a) # assert_iterable_equal(tab.values.flatten(),", "[10] * 5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab =", "3 #----------------- tab = table(a, sample(a), dnn=['x', 'y']) assert tab.index.name", "levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\",", "from datar import f from datar.datasets import warpbreaks, state_division, state_region,", "stats from datar.base import * from datar import f from", "10 # tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4)", "tab = table(d, d2, exclude=\"B\") assert tab.shape == (4, 4)", "4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------", "6) #----------------- tab = table(state_division, state_region) assert tab.loc['New England', 'Northeast']", "assert_iterable_equal(tab.values.flatten(), [10] * 4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10]", "* 4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5)", "table(b, exclude=\"B\") assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10] *", "from datar.datasets import warpbreaks, state_division, state_region, airquality from .conftest import", "tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() == ['H', 'L',", "'y']) assert tab.index.name == 'x' assert tab.columns.name == 'y' #-----------------", "iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris)", "= table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() == ['H', 'L', 'M']", "a = c(NA, Inf, (1.0/(i+1) for i in range(3))) a", "== ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab = table(warpbreaks.loc[:,", "'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab = table(warpbreaks.loc[:, ['wool', 'tension']])", "exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10))", "from datar import stats from datar.base import * from datar", "[10] * 3) tab = table(b, exclude=\"B\") assert tab.shape ==", "table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab = table(a, exclude=None)", "\"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2 =", "'tension']]) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() ==", "\"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"),", "datar import stats from datar.base import * from datar import", "tab.iloc[0,0] == 24 #----------------- a = letters[:3] tab = table(a,", "#------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b) assert tab.shape", "tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab =", "= table(a, sample(a), dnn=['x', 'y']) assert tab.index.name == 'x' assert", "tab = table(b, exclude=\"B\") assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(),", "dnn=['x', 'y']) assert tab.index.name == 'x' assert tab.columns.name == 'y'", "5) x = table(z) assert sum(x.values.flatten()) == 100 #----------------- with", "#----------------- a = c(NA, Inf, (1.0/(i+1) for i in range(3)))", "table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in tab.columns assert '<NA>' in", "a * 10 # tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10]", "from datar.datasets import iris, warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError):", "assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab = table(state_division, state_region) assert", "with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError):", "as _: tab = table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in", "pytest from datar import stats from datar.base import * from", "= table(ct, f.Month) assert tab.iloc[0,0] == 24 #----------------- a =", "table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:,", "[1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError):", "datar.datasets import warpbreaks, state_division, state_region, airquality from .conftest import assert_iterable_equal", "'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6)", "in tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab =", "assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0, 0])", "datar.base import * from datar import f from datar.datasets import", "f.tension) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() ==", "assert sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks) as _: tab", "* 10 # tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] *", "= table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab = table(a,", "_: qt = stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab =", "table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"),", "[10, 10, 0, 0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab", "== (3,3) assert sum(tab.values.flatten()) == 3 with data_context(airquality) as _:", "tab.loc['New England', 'Northeast'] == 6 #----------------- with data_context(airquality) as _:", "tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B'", "(4, 4) tab = table(\"abc\", \"cba\", dnn='x') assert tab.shape ==", "assert tab.columns.name == 'y' #----------------- a = c(NA, Inf, (1.0/(i+1)", "with data_context(airquality) as _: tab = table(f.Ozone, f.Solar_R, exclude=None) assert", "10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2, exclude=\"B\") assert tab.shape ==", "# assert_iterable_equal(tab.values.flatten(), [10] * 4) tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(),", "6 #----------------- with data_context(airquality) as _: qt = stats.quantile(f.Temp) ct", "tab = table(a, sample(a)) assert sum(tab.values.flatten()) == 3 #----------------- tab", "letters[:3] tab = table(a, sample(a)) assert sum(tab.values.flatten()) == 3 #-----------------", "#----------------- tab = table(a, sample(a), dnn=['x', 'y']) assert tab.index.name ==", "tab = table(\"abc\", \"cba\", dnn='x') assert tab.shape == (3,3) assert", "qt) tab = table(ct, f.Month) assert tab.iloc[0,0] == 24 #-----------------", "'<NA>' in tab.columns assert '<NA>' in tab.index def test_table_error(): from", "table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with", "== 3 #----------------- tab = table(a, sample(a), dnn=['x', 'y']) assert", "table(state_division, state_region) assert tab.loc['New England', 'Northeast'] == 6 #----------------- with", "'y' #----------------- a = c(NA, Inf, (1.0/(i+1) for i in", "exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10,", "dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10, 10, 0,", "tab = table(state_division, state_region) assert tab.loc['New England', 'Northeast'] == 6", "* 2) assert 'B' not in tab.columns #------------------- d =", "tab = table(b) assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10]", "assert sum(tab.values.flatten()) == 3 with data_context(airquality) as _: tab =", "pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError):", "table(a, sample(a), dnn=['x', 'y']) assert tab.index.name == 'x' assert tab.columns.name", "[]]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]],", "assert_iterable_equal(tab.values.flatten(), [10] * 3) tab = table(b, exclude=\"B\") assert tab.shape", "== (1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B' not", "<filename>tests/test_base_table.py<gh_stars>100-1000 import pytest from datar import stats from datar.base import", "= stats.rpois(100, 5) x = table(z) assert sum(x.values.flatten()) == 100", "_: tab = table(f.wool, f.tension) assert tab.columns.tolist() == ['H', 'L',", "* 6) #----------------- tab = table(state_division, state_region) assert tab.loc['New England',", "not in tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab", "* 3) tab = table(b, exclude=\"B\") assert tab.shape == (1,", "import f from datar.datasets import warpbreaks, state_division, state_region, airquality from", "as _: tab = table(f.wool, f.tension) assert tab.columns.tolist() == ['H',", "i in range(3))) a = a * 10 # tab", "table(\"abc\", \"cba\", dnn='x') assert tab.shape == (3,3) assert sum(tab.values.flatten()) ==", "* from datar import f from datar.datasets import warpbreaks, state_division,", "warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError):", "assert 'B' not in tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10),", "datar import f from datar.datasets import warpbreaks, state_division, state_region, airquality", "tab = table(ct, f.Month) assert tab.iloc[0,0] == 24 #----------------- a", "iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError): table(iris.iloc[:,", "tab = table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in tab.columns assert", "pytest.raises(ValueError): table(warpbreaks, iris) with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:,", "assert tab.shape == (3,3) assert sum(tab.values.flatten()) == 3 with data_context(airquality)", "sum(tab.values.flatten()) == 3 #----------------- tab = table(a, sample(a), dnn=['x', 'y'])", "#----------------- tab = table(state_division, state_region) assert tab.loc['New England', 'Northeast'] ==", "tab.index.name == 'x' assert tab.columns.name == 'y' #----------------- a =", "5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b) assert", "in tab.columns assert '<NA>' in tab.index def test_table_error(): from datar.datasets", "with pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError):", "def test_table_error(): from datar.datasets import iris, warpbreaks with pytest.raises(ValueError): table(iris)", "assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab", "0]) d2 = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2,", "Inf, (1.0/(i+1) for i in range(3))) a = a *", "factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\",", "tab.shape == (3,3) assert sum(tab.values.flatten()) == 3 with data_context(airquality) as", "tab = table(a, exclude=None) assert_iterable_equal(tab.values.flatten(), [10] * 5) #------------------ b", "* 5) #------------------ b = as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b)", "assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) tab", "== (1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab = table(b,", "f.Solar_R, exclude=None) assert '<NA>' in tab.columns assert '<NA>' in tab.index", "== ['H', 'L', 'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(),", "pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris.iloc[:,", "a = a * 10 # tab = table(a) #", "exclude=None) assert '<NA>' in tab.columns assert '<NA>' in tab.index def", "(1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab = table(b, exclude=\"B\")", "'Northeast'] == 6 #----------------- with data_context(airquality) as _: qt =", "range(3))) a = a * 10 # tab = table(a)", "#------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\",", "iris, warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks, iris) with", "(1, 2) assert_iterable_equal(tab.values.flatten(), [10] * 2) assert 'B' not in", "data_context(airquality) as _: qt = stats.quantile(f.Temp) ct = cut(f.Temp, qt)", "[10] * 2) assert 'B' not in tab.columns #------------------- d", "sample(a), dnn=['x', 'y']) assert tab.index.name == 'x' assert tab.columns.name ==", "datar.datasets import iris, warpbreaks with pytest.raises(ValueError): table(iris) with pytest.raises(ValueError): table(warpbreaks,", "'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #----------------- tab = table(state_division, state_region)", "table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() == ['H', 'L', 'M'] assert", "for i in range(3))) a = a * 10 #", "[9] * 6) #----------------- tab = table(state_division, state_region) assert tab.loc['New", "data_context(airquality) as _: tab = table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>'", "with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris,", "state_division, state_region, airquality from .conftest import assert_iterable_equal def test_table(): #", "factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2, exclude=\"B\") assert tab.shape", "with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with", "in range(3))) a = a * 10 # tab =", "= as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b) assert tab.shape == (1,", "z = stats.rpois(100, 5) x = table(z) assert sum(x.values.flatten()) ==", "3) assert_iterable_equal(tab.values.flatten(), [10] * 3) tab = table(b, exclude=\"B\") assert", "import warpbreaks, state_division, state_region, airquality from .conftest import assert_iterable_equal def", "f from datar.datasets import warpbreaks, state_division, state_region, airquality from .conftest", "# https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x = table(z) assert", "['wool', 'tension']]) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist()", "state_region, airquality from .conftest import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table", "assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] * 6) #-----------------", "tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist() == ['A', 'B']", "== 'x' assert tab.columns.name == 'y' #----------------- a = c(NA,", "== 100 #----------------- with data_context(warpbreaks) as _: tab = table(f.wool,", "tab = table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"])", "exclude=\"B\") assert tab.shape == (4, 4) tab = table(\"abc\", \"cba\",", "tab.columns assert '<NA>' in tab.index def test_table_error(): from datar.datasets import", "pytest.raises(ValueError): table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:,", "== 24 #----------------- a = letters[:3] tab = table(a, sample(a))", "_: tab = table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in tab.columns", "import * from datar import f from datar.datasets import warpbreaks,", "assert tab.index.name == 'x' assert tab.columns.name == 'y' #----------------- a", "dnn='x') assert tab.shape == (3,3) assert sum(tab.values.flatten()) == 3 with", "tab = table(a, sample(a), dnn=['x', 'y']) assert tab.index.name == 'x'", "= stats.quantile(f.Temp) ct = cut(f.Temp, qt) tab = table(ct, f.Month)", "* 6) tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() ==", "table(f.wool, f.tension) assert tab.columns.tolist() == ['H', 'L', 'M'] assert tab.index.tolist()", "stats.rpois(100, 5) x = table(z) assert sum(x.values.flatten()) == 100 #-----------------", "tab = table(a) # assert_iterable_equal(tab.values.flatten(), [10] * 4) tab =", "assert '<NA>' in tab.columns assert '<NA>' in tab.index def test_table_error():", "pytest.raises(ValueError): table(iris.iloc[:, [1,2]], iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris)", "= table(b, exclude=\"B\") assert tab.shape == (1, 2) assert_iterable_equal(tab.values.flatten(), [10]", "airquality from .conftest import assert_iterable_equal def test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z", "test_table(): # https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/table z = stats.rpois(100, 5) x = table(z)", "= table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(),", "levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2, exclude=\"B\") assert tab.shape == (4,", "test_table_error(): from datar.datasets import iris, warpbreaks with pytest.raises(ValueError): table(iris) with", "'L', 'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9] *", "= table(\"abc\", \"cba\", dnn='x') assert tab.shape == (3,3) assert sum(tab.values.flatten())", "table(warpbreaks.wool, iris) with pytest.raises(ValueError): table(iris.iloc[:, []]) with pytest.raises(ValueError): table(iris.iloc[:, [1,2]],", "c(NA, Inf, (1.0/(i+1) for i in range(3))) a = a", "'x' assert tab.columns.name == 'y' #----------------- a = c(NA, Inf,", "with pytest.raises(ValueError): table(iris.iloc[:, [1]], iris, iris) with pytest.raises(ValueError): table(iris.iloc[:, [1]],", "table(b) assert tab.shape == (1, 3) assert_iterable_equal(tab.values.flatten(), [10] * 3)", "= factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, d2, exclude=\"B\") assert", "assert sum(tab.values.flatten()) == 3 #----------------- tab = table(a, sample(a), dnn=['x',", "10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\",", "assert tab.shape == (4, 4) tab = table(\"abc\", \"cba\", dnn='x')", "table(ct, f.Month) assert tab.iloc[0,0] == 24 #----------------- a = letters[:3]", "#----------------- with data_context(airquality) as _: qt = stats.quantile(f.Temp) ct =", "2) assert 'B' not in tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"),", "= table(f.Ozone, f.Solar_R, exclude=None) assert '<NA>' in tab.columns assert '<NA>'", "assert '<NA>' in tab.index def test_table_error(): from datar.datasets import iris,", "6) tab = table(warpbreaks.loc[:, ['wool', 'tension']]) assert tab.columns.tolist() == ['H',", "= letters[:3] tab = table(a, sample(a)) assert sum(tab.values.flatten()) == 3", "4) tab = table(\"abc\", \"cba\", dnn='x') assert tab.shape == (3,3)", "#----------------- a = letters[:3] tab = table(a, sample(a)) assert sum(tab.values.flatten())", "tab.columns #------------------- d = factor(rep(c(\"A\",\"B\",\"C\"), 10), levels=c(\"A\",\"B\",\"C\",\"D\",\"E\")) tab = table(d,", "England', 'Northeast'] == 6 #----------------- with data_context(airquality) as _: qt", "as_factor(rep(c(\"A\",\"B\",\"C\"), 10)) tab = table(b) assert tab.shape == (1, 3)", "sum(x.values.flatten()) == 100 #----------------- with data_context(warpbreaks) as _: tab =", "= table(state_division, state_region) assert tab.loc['New England', 'Northeast'] == 6 #-----------------", "['H', 'L', 'M'] assert tab.index.tolist() == ['A', 'B'] assert_iterable_equal(tab.values.flatten(), [9]", "with data_context(warpbreaks) as _: tab = table(f.wool, f.tension) assert tab.columns.tolist()", "table(d, exclude=\"B\", dnn=['x']) assert_iterable_equal(tab.columns.to_list(), [\"A\", \"C\", \"D\", \"E\"]) assert_iterable_equal(tab.values.flatten(), [10,", "(3,3) assert sum(tab.values.flatten()) == 3 with data_context(airquality) as _: tab", "== 3 with data_context(airquality) as _: tab = table(f.Ozone, f.Solar_R," ]
[ "import * class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests that fields", "UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import * class UpdateStatementTests(TestCase): def", "self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def test_context_update(self): us", "us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET", "select statement \"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us))", "= UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3)", "{'0': 'b', '1': 'd', '2': 'x'}) def test_context_update(self): us =", "'UPDATE table SET \"a\" = :4, \"c\" = :5 WHERE", "UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us),", "test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(),", "'d')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2':", "= :1 WHERE \"a\" = :2', unicode(us)) def test_context(self): us", "'b', '1': 'd', '2': 'x'}) def test_context_update(self): us = UpdateStatement('table')", "= UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(),", ":4, \"c\" = :5 WHERE \"a\" = :3') self.assertEqual(us.get_context(), {'4':", "UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE", "'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a',", "UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING TTL 60',", "import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import * class UpdateStatementTests(TestCase):", "WhereClause, AssignmentClause from cqlengine.operators import * class UpdateStatementTests(TestCase): def test_table_rendering(self):", "properly added to the select statement \"\"\" us = UpdateStatement('table')", "class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests that fields are properly", "'5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60)", "self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self):", "'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1':", ":3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self):", "\"a\" = :0, \"c\" = :1 WHERE \"a\" = :2',", ":1 WHERE \"a\" = :2', unicode(us)) def test_context(self): us =", "SET'), str(us)) def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c',", "ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING TTL 60', unicode(us))", "\"a\" = :4, \"c\" = :5 WHERE \"a\" = :3')", "= UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us))", "'2': 'x'}) def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c',", "us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :0,", "to the select statement \"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table", "* class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests that fields are", "'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table',", "def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a',", "'x'}) def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd'))", "UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def", "unittest import TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from", "'d')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" =", "UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0':", "\"a\" = :2', unicode(us)) def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a',", "\"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table", "self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :4, \"c\" = :5", "UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests that fields are properly added", ":5 WHERE \"a\" = :3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd',", "= :4, \"c\" = :5 WHERE \"a\" = :3') self.assertEqual(us.get_context(),", "us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd',", "from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import *", "us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))", "us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" =", ":2', unicode(us)) def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c',", "table SET \"a\" = :4, \"c\" = :5 WHERE \"a\"", "TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import", "WHERE \"a\" = :2', unicode(us)) def test_context(self): us = UpdateStatement('table')", "str(us)) def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd'))", "unicode(us)) def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd'))", "def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a',", "def test_table_rendering(self): \"\"\" tests that fields are properly added to", "'x')) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :0, \"c\" =", "us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE", "tests that fields are properly added to the select statement", "fields are properly added to the select statement \"\"\" us", "us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET \"a\"", "= :2', unicode(us)) def test_context(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b'))", "table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): us", "self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a',", "'d')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET \"a\"", "'d', '3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a',", "the select statement \"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'),", "us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING", "table SET \"a\" = :0, \"c\" = :1 WHERE \"a\"", "from unittest import TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause", "\"c\" = :1 WHERE \"a\" = :2', unicode(us)) def test_context(self):", "'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET", "table SET'), str(us)) def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b'))", "test_table_rendering(self): \"\"\" tests that fields are properly added to the", "us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table", "\"\"\" tests that fields are properly added to the select", "'1': 'd', '2': 'x'}) def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a',", "self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :0, \"c\" = :1", "us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'})", "cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators import * class", ":0, \"c\" = :1 WHERE \"a\" = :2', unicode(us)) def", "def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(),", "'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :4, \"c\"", "'3': 'x'}) def test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b'))", "added to the select statement \"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE", "SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): us =", "test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(),", "SET \"a\" = :0, \"c\" = :1 WHERE \"a\" =", "= :0, \"c\" = :1 WHERE \"a\" = :2', unicode(us))", "SET \"a\" = :4, \"c\" = :5 WHERE \"a\" =", "are properly added to the select statement \"\"\" us =", "'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table", "= :5 WHERE \"a\" = :3') self.assertEqual(us.get_context(), {'4': 'b', '5':", "AssignmentClause from cqlengine.operators import * class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\"", "= UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertIn('USING TTL", "statement \"\"\" us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE", "self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us", "us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b',", "cqlengine.operators import * class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests that", "def test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a',", "= UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x')) self.assertEqual(unicode(us),", "EqualsOperator(), 'x')) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :0, \"c\"", "us = UpdateStatement('table') self.assertTrue(unicode(us).startswith('UPDATE table SET'), unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'),", "'d', '2': 'x'}) def test_context_update(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b'))", "\"a\" = :3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'})", "{'4': 'b', '5': 'd', '3': 'x'}) def test_additional_rendering(self): us =", "import TestCase from cqlengine.statements import UpdateStatement, WhereClause, AssignmentClause from cqlengine.operators", "EqualsOperator(), 'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def", "WHERE \"a\" = :3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3':", "unicode(us)) self.assertTrue(str(us).startswith('UPDATE table SET'), str(us)) def test_rendering(self): us = UpdateStatement('table')", "that fields are properly added to the select statement \"\"\"", "test_rendering(self): us = UpdateStatement('table') us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_assignment_clause(AssignmentClause('c', 'd')) us.add_where_clause(WhereClause('a', EqualsOperator(),", "'x')) self.assertEqual(us.get_context(), {'0': 'b', '1': 'd', '2': 'x'}) def test_context_update(self):", "'UPDATE table SET \"a\" = :0, \"c\" = :1 WHERE", "test_additional_rendering(self): us = UpdateStatement('table', ttl=60) us.add_assignment_clause(AssignmentClause('a', 'b')) us.add_where_clause(WhereClause('a', EqualsOperator(), 'x'))", "EqualsOperator(), 'x')) us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :4,", "\"c\" = :5 WHERE \"a\" = :3') self.assertEqual(us.get_context(), {'4': 'b',", "<reponame>dokai/cqlengine from unittest import TestCase from cqlengine.statements import UpdateStatement, WhereClause,", "us.update_context_id(3) self.assertEqual(unicode(us), 'UPDATE table SET \"a\" = :4, \"c\" =", "from cqlengine.operators import * class UpdateStatementTests(TestCase): def test_table_rendering(self): \"\"\" tests", "= :3') self.assertEqual(us.get_context(), {'4': 'b', '5': 'd', '3': 'x'}) def" ]
[ "None: self.label = name # reuse else: self.label = label", "self.callback = cb def _handle_event(self, e): if self.enabled and e.inaxes", "not self.ticked self.redraw() if self.callback != None: self.callback(self.name, self.ticked) def", "self.ax.figure.canvas # draw text if len(self.label): self.text = self.ax.text(-0.15, 0.5,", "draw a rectangle, add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize,", "rsize=0.6, enabled=True): self.name = name # unique ID associated with", "position is a tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas #", "s = 'checkbox:' + self.name + '=' + str(self.ticked) if", "+ str(self.ticked) if not self.enabled: s += ' (disabled)' return", "(Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt from", "fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self):", "# TODO: exclude spacing margin for inaxes calculation self.ticked =", "enabled=True): self.name = name # unique ID associated with #", "event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s = 'checkbox:'", "not self.enabled: s += ' (disabled)' return s def on_changed(self,", "if len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') #", "'=' + str(self.ticked) if not self.enabled: s += ' (disabled)'", "text if len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center')", "self.ax = plt.axes(position) # position is a tuple (x,y,w,h) self.ax.axis('off')", "#!/usr/bin/python import matplotlib.pyplot as plt from matplotlib.patches import Rectangle class", "= self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw a rectangle,", "# position is a tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas", "+ '=' + str(self.ticked) if not self.enabled: s += '", "col = 'grey' if self.enabled: col = ['lightgoldenrodyellow', 'blue'][self.ticked] self.ax.patches[0].set_facecolor(col)", "margin for inaxes calculation self.ticked = not self.ticked self.redraw() if", "else: self.label = label self.callback = None self.enabled = enabled", "to display next to the checkbox if label == None:", "label to display next to the checkbox if label ==", "the checkbox if label == None: self.label = name #", "(x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw text if len(self.label):", "len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw", "as plt from matplotlib.patches import Rectangle class Checkbox(): def __init__(self,", "for inaxes calculation self.ticked = not self.ticked self.redraw() if self.callback", "rectangle, add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True))", "def on_changed(self, cb): self.callback = cb def _handle_event(self, e): if", "enabled self.ticked = default self.ax = plt.axes(position) # position is", "a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup", "None: self.callback(self.name, self.ticked) def redraw(self): col = 'grey' if self.enabled:", "self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw text if len(self.label): self.text", "self.enabled and e.inaxes == self.ax: # TODO: exclude spacing margin", "self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw a rectangle, add", "display next to the checkbox if label == None: self.label", "2020 <NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as", "redraw(self): col = 'grey' if self.enabled: col = ['lightgoldenrodyellow', 'blue'][self.ticked]", "class Checkbox(): def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True):", "if self.enabled and e.inaxes == self.ax: # TODO: exclude spacing", "is a tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw", "None self.enabled = enabled self.ticked = default self.ax = plt.axes(position)", "self.callback(self.name, self.ticked) def redraw(self): col = 'grey' if self.enabled: col", "<reponame>Falcons-Robocup/code # Copyright 2020 <NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python", "import matplotlib.pyplot as plt from matplotlib.patches import Rectangle class Checkbox():", "name # unique ID associated with # label to display", "= cb def _handle_event(self, e): if self.enabled and e.inaxes ==", "' (disabled)' return s def on_changed(self, cb): self.callback = cb", "self.label, horizontalalignment='right', verticalalignment='center') # draw a rectangle, add a bit", "rsize, rsize, fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw()", "tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw text if", "next to the checkbox if label == None: self.label =", "_handle_event(self, e): if self.enabled and e.inaxes == self.ax: # TODO:", "draw text if len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right',", "to the checkbox if label == None: self.label = name", "# SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt from matplotlib.patches", "= plt.axes(position) # position is a tuple (x,y,w,h) self.ax.axis('off') self.canvas", "name # reuse else: self.label = label self.callback = None", "checkbox if label == None: self.label = name # reuse", "a rectangle, add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize,", "cb): self.callback = cb def _handle_event(self, e): if self.enabled and", "= 'grey' if self.enabled: col = ['lightgoldenrodyellow', 'blue'][self.ticked] self.ax.patches[0].set_facecolor(col) self.ax.figure.canvas.draw()", "self.enabled = enabled self.ticked = default self.ax = plt.axes(position) #", "# draw a rectangle, add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2),", "<NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt", "+ self.name + '=' + str(self.ticked) if not self.enabled: s", "self.ax: # TODO: exclude spacing margin for inaxes calculation self.ticked", "self.callback != None: self.callback(self.name, self.ticked) def redraw(self): col = 'grey'", "self.redraw() if self.callback != None: self.callback(self.name, self.ticked) def redraw(self): col", "unique ID associated with # label to display next to", "setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s =", "= None self.enabled = enabled self.ticked = default self.ax =", "self.label = label self.callback = None self.enabled = enabled self.ticked", "verticalalignment='center') # draw a rectangle, add a bit of spacing", "def _handle_event(self, e): if self.enabled and e.inaxes == self.ax: #", "if self.callback != None: self.callback(self.name, self.ticked) def redraw(self): col =", "self.ticked self.redraw() if self.callback != None: self.callback(self.name, self.ticked) def redraw(self):", "Copyright 2020 <NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot", "= self.ax.figure.canvas # draw text if len(self.label): self.text = self.ax.text(-0.15,", "= enabled self.ticked = default self.ax = plt.axes(position) # position", "label == None: self.label = name # reuse else: self.label", "# label to display next to the checkbox if label", "spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event',", "self.name = name # unique ID associated with # label", "import Rectangle class Checkbox(): def __init__(self, name, position, default=False, label=None,", "if not self.enabled: s += ' (disabled)' return s def", "position, default=False, label=None, rsize=0.6, enabled=True): self.name = name # unique", "self.label = name # reuse else: self.label = label self.callback", "label=None, rsize=0.6, enabled=True): self.name = name # unique ID associated", "# reuse else: self.label = label self.callback = None self.enabled", "self._handle_event) self.redraw() def __repr__(self): s = 'checkbox:' + self.name +", "reuse else: self.label = label self.callback = None self.enabled =", "Rectangle class Checkbox(): def __init__(self, name, position, default=False, label=None, rsize=0.6,", "of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup event handling", "handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s = 'checkbox:' +", "self.ticked = not self.ticked self.redraw() if self.callback != None: self.callback(self.name,", "with # label to display next to the checkbox if", "if label == None: self.label = name # reuse else:", "# draw text if len(self.label): self.text = self.ax.text(-0.15, 0.5, self.label,", "== None: self.label = name # reuse else: self.label =", "self.redraw() def __repr__(self): s = 'checkbox:' + self.name + '='", "cb def _handle_event(self, e): if self.enabled and e.inaxes == self.ax:", "and e.inaxes == self.ax: # TODO: exclude spacing margin for", "'checkbox:' + self.name + '=' + str(self.ticked) if not self.enabled:", "0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw a rectangle, add a", "associated with # label to display next to the checkbox", "def redraw(self): col = 'grey' if self.enabled: col = ['lightgoldenrodyellow',", "plt.axes(position) # position is a tuple (x,y,w,h) self.ax.axis('off') self.canvas =", "self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s = 'checkbox:' + self.name", "= name # unique ID associated with # label to", "ID associated with # label to display next to the", "from matplotlib.patches import Rectangle class Checkbox(): def __init__(self, name, position,", "# setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def __repr__(self): s", "def __repr__(self): s = 'checkbox:' + self.name + '=' +", "calculation self.ticked = not self.ticked self.redraw() if self.callback != None:", "= 'checkbox:' + self.name + '=' + str(self.ticked) if not", "bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup event", "# Copyright 2020 <NAME> (Falcons) # SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import", "self.ticked) def redraw(self): col = 'grey' if self.enabled: col =", "= name # reuse else: self.label = label self.callback =", "e.inaxes == self.ax: # TODO: exclude spacing margin for inaxes", "s += ' (disabled)' return s def on_changed(self, cb): self.callback", "SPDX-License-Identifier: Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt from matplotlib.patches import", "= not self.ticked self.redraw() if self.callback != None: self.callback(self.name, self.ticked)", "Checkbox(): def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True): self.name", "default self.ax = plt.axes(position) # position is a tuple (x,y,w,h)", "label self.callback = None self.enabled = enabled self.ticked = default", "str(self.ticked) if not self.enabled: s += ' (disabled)' return s", "self.callback = None self.enabled = enabled self.ticked = default self.ax", "self.canvas = self.ax.figure.canvas # draw text if len(self.label): self.text =", "(disabled)' return s def on_changed(self, cb): self.callback = cb def", "self.enabled: s += ' (disabled)' return s def on_changed(self, cb):", "+= ' (disabled)' return s def on_changed(self, cb): self.callback =", "horizontalalignment='right', verticalalignment='center') # draw a rectangle, add a bit of", "self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center') # draw a", "__repr__(self): s = 'checkbox:' + self.name + '=' + str(self.ticked)", "matplotlib.patches import Rectangle class Checkbox(): def __init__(self, name, position, default=False,", "__init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True): self.name = name", "def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True): self.name =", "= label self.callback = None self.enabled = enabled self.ticked =", "# unique ID associated with # label to display next", "add a bit of spacing self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) #", "rsize, fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event) self.redraw() def", "self.ticked = default self.ax = plt.axes(position) # position is a", "default=False, label=None, rsize=0.6, enabled=True): self.name = name # unique ID", "return s def on_changed(self, cb): self.callback = cb def _handle_event(self,", "TODO: exclude spacing margin for inaxes calculation self.ticked = not", "self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True)) # setup event handling self.canvas.mpl_connect('button_release_event', self._handle_event)", "exclude spacing margin for inaxes calculation self.ticked = not self.ticked", "inaxes calculation self.ticked = not self.ticked self.redraw() if self.callback !=", "Apache-2.0 #!/usr/bin/python import matplotlib.pyplot as plt from matplotlib.patches import Rectangle", "self.name + '=' + str(self.ticked) if not self.enabled: s +=", "plt from matplotlib.patches import Rectangle class Checkbox(): def __init__(self, name,", "a tuple (x,y,w,h) self.ax.axis('off') self.canvas = self.ax.figure.canvas # draw text", "== self.ax: # TODO: exclude spacing margin for inaxes calculation", "spacing margin for inaxes calculation self.ticked = not self.ticked self.redraw()", "name, position, default=False, label=None, rsize=0.6, enabled=True): self.name = name #", "on_changed(self, cb): self.callback = cb def _handle_event(self, e): if self.enabled", "!= None: self.callback(self.name, self.ticked) def redraw(self): col = 'grey' if", "s def on_changed(self, cb): self.callback = cb def _handle_event(self, e):", "e): if self.enabled and e.inaxes == self.ax: # TODO: exclude", "matplotlib.pyplot as plt from matplotlib.patches import Rectangle class Checkbox(): def", "= default self.ax = plt.axes(position) # position is a tuple" ]
[ "8]. Default is 3') args = p.parse_args() generate(args.size, args.filename) if", "8: print(\"It isn't valid size\") exit(4) generator = Generator(n) data", "-> None: if n < 3 or n > 8:", "def generate(n: int, output_file: str) -> None: if n <", "p.add_argument('filename', type=str, help='Path to output file') p.add_argument('-s', \"--size\", type=int, default=3,", "encoding='utf-8') as f: f.write('\\n'.join(lines)) def main(): p = ArgumentParser() p.add_argument('filename',", "p = ArgumentParser() p.add_argument('filename', type=str, help='Path to output file') p.add_argument('-s',", "SxS field. size must be in [3, 8]. Default is", "Default is 3') args = p.parse_args() generate(args.size, args.filename) if __name__", "n < 3 or n > 8: print(\"It isn't valid", "is 3') args = p.parse_args() generate(args.size, args.filename) if __name__ ==", "3 or n > 8: print(\"It isn't valid size\") exit(4)", "x: ' '.join(map(str, x)), data) with open(output_file, 'w', encoding='utf-8') as", "f: f.write('\\n'.join(lines)) def main(): p = ArgumentParser() p.add_argument('filename', type=str, help='Path", "< 3 or n > 8: print(\"It isn't valid size\")", "help='Generate SxS field. size must be in [3, 8]. Default", "3') args = p.parse_args() generate(args.size, args.filename) if __name__ == '__main__':", "from argparse import ArgumentParser def generate(n: int, output_file: str) ->", "main(): p = ArgumentParser() p.add_argument('filename', type=str, help='Path to output file')", "type=str, help='Path to output file') p.add_argument('-s', \"--size\", type=int, default=3, help='Generate", "size must be in [3, 8]. Default is 3') args", "valid size\") exit(4) generator = Generator(n) data = generator.generate() lines", "lines = map(lambda x: ' '.join(map(str, x)), data) with open(output_file,", "ArgumentParser def generate(n: int, output_file: str) -> None: if n", "as f: f.write('\\n'.join(lines)) def main(): p = ArgumentParser() p.add_argument('filename', type=str,", "open(output_file, 'w', encoding='utf-8') as f: f.write('\\n'.join(lines)) def main(): p =", "to output file') p.add_argument('-s', \"--size\", type=int, default=3, help='Generate SxS field.", "generator.generate() lines = map(lambda x: ' '.join(map(str, x)), data) with", "' '.join(map(str, x)), data) with open(output_file, 'w', encoding='utf-8') as f:", "Generator from argparse import ArgumentParser def generate(n: int, output_file: str)", "hitori_generator import Generator from argparse import ArgumentParser def generate(n: int,", "str) -> None: if n < 3 or n >", "output_file: str) -> None: if n < 3 or n", "x)), data) with open(output_file, 'w', encoding='utf-8') as f: f.write('\\n'.join(lines)) def", "help='Path to output file') p.add_argument('-s', \"--size\", type=int, default=3, help='Generate SxS", "p.add_argument('-s', \"--size\", type=int, default=3, help='Generate SxS field. size must be", "exit(4) generator = Generator(n) data = generator.generate() lines = map(lambda", "= generator.generate() lines = map(lambda x: ' '.join(map(str, x)), data)", "output file') p.add_argument('-s', \"--size\", type=int, default=3, help='Generate SxS field. size", "Generator(n) data = generator.generate() lines = map(lambda x: ' '.join(map(str,", "n > 8: print(\"It isn't valid size\") exit(4) generator =", "f.write('\\n'.join(lines)) def main(): p = ArgumentParser() p.add_argument('filename', type=str, help='Path to", "field. size must be in [3, 8]. Default is 3')", "def main(): p = ArgumentParser() p.add_argument('filename', type=str, help='Path to output", "default=3, help='Generate SxS field. size must be in [3, 8].", "= map(lambda x: ' '.join(map(str, x)), data) with open(output_file, 'w',", "import ArgumentParser def generate(n: int, output_file: str) -> None: if", "= ArgumentParser() p.add_argument('filename', type=str, help='Path to output file') p.add_argument('-s', \"--size\",", "type=int, default=3, help='Generate SxS field. size must be in [3,", "if n < 3 or n > 8: print(\"It isn't", "argparse import ArgumentParser def generate(n: int, output_file: str) -> None:", "generator = Generator(n) data = generator.generate() lines = map(lambda x:", "import Generator from argparse import ArgumentParser def generate(n: int, output_file:", "None: if n < 3 or n > 8: print(\"It", "be in [3, 8]. Default is 3') args = p.parse_args()", "> 8: print(\"It isn't valid size\") exit(4) generator = Generator(n)", "'w', encoding='utf-8') as f: f.write('\\n'.join(lines)) def main(): p = ArgumentParser()", "= Generator(n) data = generator.generate() lines = map(lambda x: '", "map(lambda x: ' '.join(map(str, x)), data) with open(output_file, 'w', encoding='utf-8')", "generate(n: int, output_file: str) -> None: if n < 3", "or n > 8: print(\"It isn't valid size\") exit(4) generator", "isn't valid size\") exit(4) generator = Generator(n) data = generator.generate()", "file') p.add_argument('-s', \"--size\", type=int, default=3, help='Generate SxS field. size must", "in [3, 8]. Default is 3') args = p.parse_args() generate(args.size,", "from hitori_generator import Generator from argparse import ArgumentParser def generate(n:", "size\") exit(4) generator = Generator(n) data = generator.generate() lines =", "data) with open(output_file, 'w', encoding='utf-8') as f: f.write('\\n'.join(lines)) def main():", "must be in [3, 8]. Default is 3') args =", "'.join(map(str, x)), data) with open(output_file, 'w', encoding='utf-8') as f: f.write('\\n'.join(lines))", "[3, 8]. Default is 3') args = p.parse_args() generate(args.size, args.filename)", "data = generator.generate() lines = map(lambda x: ' '.join(map(str, x)),", "print(\"It isn't valid size\") exit(4) generator = Generator(n) data =", "with open(output_file, 'w', encoding='utf-8') as f: f.write('\\n'.join(lines)) def main(): p", "int, output_file: str) -> None: if n < 3 or", "\"--size\", type=int, default=3, help='Generate SxS field. size must be in", "args = p.parse_args() generate(args.size, args.filename) if __name__ == '__main__': main()", "ArgumentParser() p.add_argument('filename', type=str, help='Path to output file') p.add_argument('-s', \"--size\", type=int," ]
[ "attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for child", "in self.xpath('./indirect_object')]) def has_key(self,key): key = \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0", "dest = \"%d %d\"%r.value dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig, dest)", "original token layed in the file ''' def _getspan(self): return", "PDFName(PDFString): pass class PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self, value):", "self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary'", "iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert", "= hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex') key = h[:n] print", "pld = pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt every string", "i in self.text.split(' ')]) def solve(self): ''' search the referenced", "length n = encrypt_py['Length']/8 print \"N:\",n #a) Pad or truncate", "if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter() def", "\"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if tuple ==", "m.digest() pld = e.value if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld", "return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): ''' This", "= val val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return", "leaf tag: %s\"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del", "null password encryption ''' import hashlib, struct from Crypto.Cipher import", "['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']: return lambda payload,", "len(self.getchildren())==1, \"Wrong number of children in indirect object\" return (self.id,", "not match the actual data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if", "= 16 key_size = 32 def encrypt(plain_text,key_bytes): assert len(key_bytes) ==", "true if this is an object stream (ObjStml) ''' return", "def getRoot(self): ''' Get the pdf Root node of this", "positions = sorted(pointers.keys() + [len(data)]) parsed_objects = [] for p", "structure and replace it with all the new indirect objects.", "compressed at this point\" assert self.dictionary.has_key('N'), \"N is mandatory in", "i): if str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return", "int, 'R must be two numbers, n and g' assert", "value): assert type(value) in [int, float], 'Wrong type for a", "trailer.has_key('ID'): return trailer['ID'] else: return ['',''] def getIndirectObject(self, ref): '''", "parse assert not self.isFiltered(), \"ObjStm should not be compressed at", "def expandAllObjStm(self): ''' Find all object streams and expand them.", "in range(0,len(pointers),2) ]) positions = sorted(pointers.keys() + [len(data)]) parsed_objects =", "type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get", "= h[:n] print \"Encryption KEY\", key.encode('hex') print \"Try to authenticate\"", "of children in indirect object\" return (self.id, self.object.value) def _getobject(self):", "def _setval(self, val): self[1] = val val = property(_getval,_setval,None) class", "if self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML):", "type' if type(filters) != list: filters=[filters] params=params and [params] or", "type for a number' self.text = str(value) def to_python(self): x", "begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ]) positions", "\\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms')", "list: filters=[filters] params=params and [params] or [{}] if params ==", "!= list and (type(params) == dict or params==None ) ]),", "iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad =", "params=params and [params] or [{}] if params == None: params", "\"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if", "is None, 'Value must be None' self.text = 'null' def", "containing pdf ''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML):", "e in self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e) class", "value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end = self.span self.span =", "self.text return float(int(float(x))) == float(x) and int(float(x)) or float(x) class", "= PDFStartxref namespace['data'] = PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary']", "pdf Root node of this update. ''' return self[self.getTrailer()['Root'].value].object def", "the pdf Root node of this update. ''' return self[self.getTrailer()['Root'].value].object", "key): assert key.tag == 'name' self[0] = key key =", "if this is an object stream (ObjStml) ''' return self.dictionary.has_key('Type')", "return tuple([int(i) for i in self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value)", "= len(self.data.value) def defilter(self): try: while self.isFiltered(): self.popFilter() except Exception,e:", "nodes_added.add(dest) try: root = \"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\" ->", "\"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e :", "type(io) == PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self):", "len(self.data.value): logger.info(\"Length field of object %s does not match the", "be a dictionary.. or null?' assert len(filters) == len(params),'Number of", "property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for i in self.get('id').split(' ')])", "base64 def rc4crypt(data, key): x = 0 box = range(256)", "ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First is mandatory in ObjStm dictionary\"", "return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the object and generation", "being encrypted, pass 4 bytes with the value 0xFFFFFFFF to", "list(key)]) _buf1 = rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt data <%s>", ")) #e) append ID ? #TODO, get the ID from", "to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString): pass class", "input to this function. m = hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5", "selected_params) for v,i in deletion_list: del v[i] dictionary['Length'].value = len(self.data.value)", "%d\"%(n,g) def to_python(self): return tuple([int(i) for i in self.text.split(' ')])", "\"ObjStm should not be compressed at this point\" assert self.dictionary.has_key('N'),", "from opaflib.filters import defilterData #Logging facility import logging,code logger =", "dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root", "object streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a", "== None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary", "else: selected_filter = dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params =", "= range(256) for i in range(256): x = (x +", "startxref = self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag in ['xref',", "inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace the object stream by", "function. m = hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex')", "in data[:first].split()] assert len(pointers)%2 == 0 , \"Wrong number of", "get_numgen(self): ''' Search the object and generation number of any", "names' assert all([type(x)==dict for x in params]), 'Params should be", "#FIX move all this to pdf_update and do the wrapper", "PDFBool(PDFString): def from_python(self, value): assert type(value) == bool, 'Value must", "at certain byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None):", "to the MD5 hash function. m.update (encrypt_py['O'][:32]) print \"MD5 update", "def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (of this", "[stream], obj=(1,0)) array = create_tree('array', [create_leaf('number', i) for i in", "e in self] def getStartxref(self): ''' Get the last startxref", "def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph of the pdf", "if trailer.has_key('ID'): return trailer['ID'] else: return ['',''] def getIndirectObject(self, ref):", "generation number (%d)'%g self.text = \"%d %d\"%(n,g) def to_python(self): return", "mode = AES.MODE_CBC pad = block_size - len(plain_text) % block_size", "and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count number of", "self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the object and", "span wich indicates where the original token layed in the", "for child in self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def _to_xml(self):", "io_objstm.object.expandObjStm() #replace the object stream by its childs for new_io", "= dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del", "expandAllObjStm(self): ''' Find all object streams and expand them. Each", "indicates where the original token layed in the file '''", "only Version 4 supported\" #password length n = encrypt_py['Length']/8 print", "logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return true if", "def countObjStm(self): ''' Count number of 'compressed' object streams '''", "PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf", "_buf1 = rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt data <%s> with", "found at certain byte position (only in this update!)''' return", "\"digraph {\\n\" nodes_added = set() for io in self.pdf_update.indirect_object: references", "5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex') h = m.digest()[:n] for", "print \"1rst DIGEST:\", m.digest().encode('hex') h = m.digest()[:n] for i in", "PDFUpdate(PDFXML): def to_python(self): return dict([e.value for e in self.xpath('./indirect_object')]) def", "%s does not match the actual data size (%d !=", "ENCRYPTED!\" encrypt_py = encrypt.value print encrypt_py #Ok try to decrypt", "return self[1] def _setdata(self, data): assert data.tag == 'data' self[1]", "PDFXMLFactory() def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag,", "defilterData .. make it register/unregister able. #(think /Crypt 7.4.10 Crypt", "the object and generation number of any pdf element '''", "== 'dictionary' self[0] = d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self):", "true if pdf is encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self):", "**kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag,", "deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\ type(dictionary['Filter']) ==", "x in params]), 'Params should be a dictionary.. or null?'", "-> \"%s\";\\n'%root except Exception,e : pass dotdata += '}\\n' logger.info(\"Writing", "attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree def create_tree(self, tag, *childs,", "'xref', 'pdf', 'pdf_update'], \"Got wrong tree tag: %s\"%tag xml =", "pad = ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get and print", "== list and (type(params) == list or params==None ), type(filters)", "== 4, \"Sorry only Version 4 supported\" assert encrypt_py['R'] ==", "and replace it with all the new indirect objects. '''", "for i in range(0,50): h = hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i,", "#FIX recode defilterData .. make it register/unregister able. #(think /Crypt", "decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return dict([e.value for e in", "assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'],", "selected_filter = dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms']", "def expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory():", "= value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass class", "''' return self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value filters =", "self.create_leaf(tag, payload, **my_kwargs) elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream',", "def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0] def", "def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML):", "assert encrypt_py['R'] == 4, \"Sorry only Version 4 supported\" #password", "= PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf", "defilter(self): try: while self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter <%s>", "%s(a dot file). Download graphviz or try this http://rise4fun.com/Agl for", "if stream is filtered ''' return self.dictionary.has_key('Filter') def getFilters(self): val", "def _setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end = self.span", "a span wich indicates where the original token layed in", "self.span = (begin+offset,end+offset) if recursive: for child in self.getchildren(): child.span_move(offset)", "def getFilters(self): val = self.dictionary.value filters = val.get('Filter',None) params =", "http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find all", "#assert len(key_bytes) == key_size mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size]", "authenticate\" _buf = hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i", "the object found at certain byte position (only in this", "and g < 65535 , 'Invalid generation number (%d)'%g self.text", "= self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py = encrypt.value print encrypt_py", "''' This will try to decrypt V:4 null password encryption", "PDFNumber namespace['null'] = PDFNull namespace['bool'] = PDFBool namespace['R'] = PDFR", "dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter')", "0 and g < 65535 , 'Invalid generation number (%d)'%g", "attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for child in childs: xml.append(child)", "str(value) def to_python(self): x = self.text return float(int(float(x))) == float(x)", "def defilterAll(self): ''' Find all object streams and expand them.", "e in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def", "def rc4crypt(data, key): x = 0 box = range(256) for", "KEY\", key.encode('hex') print \"Try to authenticate\" _buf = hashlib.md5(pad +", "len(references) == 0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r", "\"Wrong number of children in indirect object\" return (self.id, self.object.value)", "children in indirect object\" return (self.id, self.object.value) def _getobject(self): return", "= self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span']", "return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if str == type(key):", "attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree def", "''' Search 'compressed' object streams ids/refs''' return [io.id for io", "def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff))", "Every pdf token xml representation will have a span wich", "plain_text[:-pad] assert self.isEncrypted() #Get and print the encryption dictionary encrypt", "to_python(self): x = self.text return float(int(float(x))) == float(x) and int(float(x))", "number of 'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2", "graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph of the pdf '''", "2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value of the P", "popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter", "len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm =", "def _to_xml(self): return etree.tostring(self) xml = property(_to_xml) def _from_python(self, value):", "% 256])) return ''.join(out) block_size = 16 key_size = 32", "ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter", "\"%d %d\"%(n,g) def to_python(self): return tuple([int(i) for i in self.text.split('", "def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return [e.value", "= create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect =", "_getid(self): return tuple([int(i) for i in self.get('id').split(' ')]) def _setid(self,", "[] for p in range(0,len(positions)-1): logger.info(\"Adding new object %s from", "pointer (should be at least one) ''' return self.pdf_update[-1].startxref[-1] #FIX", "create_leaf(self, tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf", "== type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self,", "create_tree(self, tag, *childs, **attribs): assert tag in ['indirect_object','dictionary', 'entry', 'array',", "in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif", "in self.pdf_update: if u.has_key(ref): return u[ref] def getRoot(self): ''' Get", "to_python(self): return {'false': False, 'true': True}[self.text] class PDFNull(PDFString): def from_python(self,", "'true': True}[self.text] class PDFNull(PDFString): def from_python(self, value): assert value is", "self.get('id').split(' ')]) def _setid(self, o): self.set('id', \"%d %d\"%o) id =", "assert self.text == 'null', 'PDFNull xml not initialized' return None", "import hashlib, struct from Crypto.Cipher import AES from Crypto.Util import", "Crypto.Util import randpool import base64 def rc4crypt(data, key): x =", "4 supported\" #password length n = encrypt_py['Length']/8 print \"N:\",n #a)", "''' This parses the ObjStm structure and replace it with", "to authenticate\" _buf = hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for", "Trailer dictionary (should be at least one) ''' if startxref", "tuple == type(key): key = \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return", "u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find all object streams and", "= PDFBool namespace['R'] = PDFR namespace['header'] = PDFHeader namespace['startxref'] =", "in ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First is mandatory in ObjStm", "= io.xpath(\".//R\") orig = \"%d %d\"%io.id if len(references) == 0:", "= str(value) def to_python(self): x = self.text return float(int(float(x))) ==", "list or params==None ), type(filters) != list and (type(params) ==", "dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py = encrypt.value", "return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if tuple == type(key): key", "'Value must be a boolean' self.text = ['false','true'][int(value)] def to_python(self):", "tree tag: %s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span']", "nodes_added.add(orig) else: for r in references: dest = \"%d %d\"%r.value", "wich indicates where the original token layed in the file", "self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref in", "m.update (encrypt_py['O'][:32]) print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the", ") ]), 'Filter/DecodeParms wrong type' if type(filters) != list: filters=[filters]", "graph of the pdf ''' dotdata = \"digraph {\\n\" nodes_added", "p in range(0,len(positions)-1): logger.info(\"Adding new object %s from objectstream\"%repr((pointers[positions[p]],0))) io", "''' from opaflib.parser import parse assert not self.isFiltered(), \"ObjStm should", "print encrypt_py #Ok try to decrypt it ... assert encrypt_py['V']", "'pdf_update'], \"Got wrong tree tag: %s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span',", "params==None ) ]), 'Filter/DecodeParms wrong type' if type(filters) != list:", "data: x = (x + 1) % 256 y =", "Pad or truncate the password string to exactly 32 bytes.", "m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest() pld = e.value", "data = property(_getdata,_setdata,None) def isFiltered(self): ''' Check if stream is", "#del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) == PDFArray and", "== bool, 'Value must be a boolean' self.text = ['false','true'][int(value)]", "v[i] dictionary['Length'].value = len(self.data.value) def defilter(self): try: while self.isFiltered(): self.popFilter()", "'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], \"Got wrong tree tag:", "\"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex') print \"PAD: \", pad.encode('hex') #b)", "== 'ObjStm' #completelly defilter the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter()", "create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0)) array", "'PDFNull xml not initialized' return None class PDFR(PDFString): def from_python(self,", "for render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find all object", "to exactly 32 bytes. user_password = \"\" pad = \"<PASSWORD>\".decode('hex')", "\\ type(dictionary['DecodeParms']) == PDFArray and \\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary,", "!= len(self.data.value): logger.info(\"Length field of object %s does not match", "return [e.value for e in self] class PDFIndirect(PDFXML): def to_python(self):", "super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val]", "Base pdf-xml class. Every pdf token xml representation will have", "= self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val", "if len(references) == 0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for", "+= '\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e : pass dotdata +=", "x = 0 box = range(256) for i in range(256):", "self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in", "'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) == PDFArray", "tuple([int(i) for i in self.text.split(' ')]) def solve(self): ''' search", "stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return true if this is an", "an object stream (ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value ==", "self[1] def _setval(self, val): self[1] = val val = property(_getval,_setval,None)", "this update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number", "decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode = AES.MODE_CBC iv_bytes =", "payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag in ['indirect_object','dictionary', 'entry',", "try this http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): '''", ") self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params) for", "_getval(self): return self[1] def _setval(self, val): self[1] = val val", "u in self.pdf_update: if u.has_key(ref): return u[ref] def getRoot(self): '''", "Using null pass '' )\" print \"U\", encrypt_py['U'].encode('hex') print \"O\",", "return self[0] def _setkey(self, key): assert key.tag == 'name' self[0]", "i in self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True):", "xml.set(attr_key, str(attr_val)) return xml #Tree def create_tree(self, tag, *childs, **attribs):", "= [int(x) for x in data[:first].split()] assert len(pointers)%2 == 0", "byte position (only in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self,", "+ box[x]) % 256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char)", "return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if str == type(key):", "= create_tree('array', [create_leaf('number', i) for i in range(0,10)]) xml=indirect print", "text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value ==", "encrypted, pass 4 bytes with the value 0xFFFFFFFF to the", "if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need", "'' )\" print \"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element):", "def __delitem__(self, key): if tuple == type(key): key = \"%d", "key): x = 0 box = range(256) for i in", "text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count number of 'compressed'", "= PDFName namespace['string'] = PDFString namespace['number'] = PDFNumber namespace['null'] =", "dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r in references: dest", "the value 0xFFFFFFFF to the MD5 hash function. if encrypt_py.has_key('EncryptMetadata')", "value = property(_to_python,_from_python) def __getattr__(self, name): tags = set([e.tag for", "% 256 box[i], box[x] = box[x], box[i] x = 0", "encryption dictionary's O entry to the MD5 hash function. m.update", "256 y = (y + box[x]) % 256 box[x], box[y]", "false: m.update('\\xff'*4) print \"MD5 update 5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\",", "\"N is mandatory in ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First is", "and io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This will try to", "= self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream']", "name): tags = set([e.tag for e in self]) if name", "65535 , 'Invalid object number (%d)'%n assert g >= 0", "_getdata(self): return self[1] def _setdata(self, data): assert data.tag == 'data'", "wrong tree tag: %s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del", "%d\"%o) id = property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML):", "position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the", "pass 4 bytes with the value 0xFFFFFFFF to the MD5", "''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number of 'compressed'", "'pdf', 'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return", "block_size = 16 key_size = 32 def encrypt(plain_text,key_bytes): assert len(key_bytes)", "to_python(self): return [e.value for e in self] class PDFIndirect(PDFXML): def", "self.dictionary data = self.data.value first = dictionary[\"First\"].value pointers = [int(x)", "encrypt.value print encrypt_py #Ok try to decrypt it ... assert", "self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if", "for x in data[:first].split()] assert len(pointers)%2 == 0 , \"Wrong", "logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every", "self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value): assert type(value)", "or params==None ), type(filters) != list and (type(params) == dict", "self.text = value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass", "+= '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r in references: dest =", "len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count number", "else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def", "bool, 'Value must be a boolean' self.text = ['false','true'][int(value)] def", "= hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass", "MD5 hash function. m.update (encrypt_py['O'][:32]) print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex')", "= key key = property(_getkey,_setkey,None) def _getval(self): return self[1] def", "u[:]: if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter()", "this is an object stream (ObjStml) ''' return self.dictionary.has_key('Type') and", "= \"%d %d\"%r.value dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig)", "None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary def", "encrypt_py['U'][:16] print \"Authenticated! (An actual pass is not needed. Using", "def get_numgen(self): ''' Search the object and generation number of", "solve(self): ''' search the referenced indirect object in the containing", ".. make it register/unregister able. #(think /Crypt 7.4.10 Crypt Filter", "n >= 0 and n < 65535 , 'Invalid object", "#Ok try to decrypt it ... assert encrypt_py['V'] == 4,", "''' Search for an indirect object ''' for u in", "isObjStm(self): ''' Return true if this is an object stream", "representation will have a span wich indicates where the original", "assert type(value) == bool, 'Value must be a boolean' self.text", "key, val): if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val)", "for i in self.text.split(' ')]) def solve(self): ''' search the", "return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number of 'compressed' object", "'' m.update (ID) print \"MD5 update 4\", ID.encode('hex') #f) If", "io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return", "%d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if tuple == type(key):", "node of this update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): '''", "''.join(out) block_size = 16 key_size = 32 def encrypt(plain_text,key_bytes): assert", "last startxref pointer (should be at least one) ''' return", "encrypt_py['R'] == 4, \"Sorry only Version 4 supported\" #password length", "'Filter shall be a names' assert all([type(x)==dict for x in", "str(attr_val)) for child in childs: xml.append(child) return xml def __getattr__(self,tag,", "of the encryption dictionary's O entry to the MD5 hash", "''' import hashlib, struct from Crypto.Cipher import AES from Crypto.Util", "return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number of 'compressed' object", "payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory()", "'array', 'stream', 'xref', 'pdf', 'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag,", "self.isEncrypted() #Get and print the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object", "begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del", "child.span_move(offset) def span_expand(self,span): begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1])) def", "type(key): key = \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def", "startxref = self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary def getRoot(self):", "the # MD5 hash function, low-order byte first. WTF!!?? print", "int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def to_python(self): return", "if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print \"MD5 update", "expanded_iobjects = io_objstm.object.expandObjStm() #replace the object stream by its childs", "defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params) for v,i in deletion_list:", "'Invalid object number (%d)'%n assert g >= 0 and g", "and do the wrapper here def getObjectAt(self, pos): ''' Get", "if tuple == type(key): key = \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0])", "PDFArray, 'Array of filters need array of decoding params' selected_params", "dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def _setdata(self, data):", "function, low-order byte first. WTF!!?? print \"MD5 update 3\", struct.pack(\"<L\",", "logger.info(\"Adding new object %s from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\"", "None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag in", "gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf == encrypt_py['U'][:16]", "be None' self.text = 'null' def to_python(self): assert self.text ==", "pdf_update and do the wrapper here def getObjectAt(self, pos): '''", "for ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find all", "dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter =", "to_python(self): assert len(self.getchildren())==1, \"Wrong number of children in indirect object\"", "import etree from opaflib.filters import defilterData #Logging facility import logging,code", "getStartxref(self): ''' Get the last startxref pointer (should be at", "assert self.dictionary.has_key('First'), \"First is mandatory in ObjStm dictionary\" dictionary =", "replaced by its childs ''' for u in self.pdf_update: for", "print the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\"", "\"Sorry only Version 4 supported\" #password length n = encrypt_py['Length']/8", "= xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff))", "in range(256): x = (x + box[i] + ord(key[i %", "''' Return true if this is an object stream (ObjStml)", "''' Get the Trailer dictionary (should be at least one)", "encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode = AES.MODE_CBC pad =", "box[x]) % 256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^", ">= 0 and n < 65535 , 'Invalid object number", "%s\"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for", "= \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key):", "== int, 'R must be two numbers, n and g'", "create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\": name", "string to exactly 32 bytes. user_password = \"\" pad =", "print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value", "from Crypto.Util import randpool import base64 def rc4crypt(data, key): x", "== type(key): key = \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key)", "ids/refs''' return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and", "return self[0] def _setobject(self, o): self[0] = o object =", "namespace['null'] = PDFNull namespace['bool'] = PDFBool namespace['R'] = PDFR namespace['header']", "params = val.get('DecodeParams',None) assert any([type(filters) == list and (type(params) ==", "(of this update!)''' if startxref == None: startxref = self.getStartxref().value", "params == None: params = [{}]*len(filters) assert all([type(x)==str for x", "((user_password+pad)[:32]).encode('hex') #c) Pass the value of the encryption dictionary's O", "new object %s from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \"))", "pass the result of step (a) as input to this", "countObjStm(self): ''' Count number of 'compressed' object streams ''' return", "n and g' assert n >= 0 and n <", "parses the ObjStm structure and replace it with all the", "for io in u[:]: if type(io) == PDFIndirect and io.isStream()", "number and pass these bytes to the # MD5 hash", "be at least one) ''' if startxref == None: startxref", "0 box = range(256) for i in range(256): x =", "32 bytes. user_password = \"\" pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD:", "import logging,code logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base pdf-xml", "block_size - len(plain_text) % block_size data = plain_text + pad", "unsigned binary number and pass these bytes to the #", "type(value) in [int, float], 'Wrong type for a number' self.text", "self[1] = data data = property(_getdata,_setdata,None) def isFiltered(self): ''' Check", "== PDFArray and \\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del", "assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag: %s\"%tag xml", "while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe objects inside it", "return [e.value for e in self] def getStartxref(self): ''' Get", "h[:n] print \"Encryption KEY\", key.encode('hex') print \"Try to authenticate\" _buf", "decrypt it ... assert encrypt_py['V'] == 4, \"Sorry only Version", "def to_python(self): return [e.value for e in self] def getStartxref(self):", "m.update('\\xff'*4) print \"MD5 update 5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex')", "and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): ''' This parses the", "int(float(x)) or float(x) class PDFStartxref(PDFString): def from_python(self, value): assert type(value)", "function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print \"MD5", "len(self.data.value) def defilter(self): try: while self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't", "the ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2)", "it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf ==", "xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary' return xref[0]", "super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if str == type(key): self.xpath('./entry/name[position()=1", "+ 1) % 256 y = (y + box[x]) %", "box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256])) return ''.join(out)", "ID ? #TODO, get the ID from the trailer.. ID", "namespace['name'] = PDFName namespace['string'] = PDFString namespace['number'] = PDFNumber namespace['null']", "assert value is None, 'Value must be None' self.text =", "objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0) parsed_objects.append(io)", "'Filter/DecodeParms wrong type' if type(filters) != list: filters=[filters] params=params and", "self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span'] for child", "= property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for i in self.get('id').split('", "= (y + box[x]) % 256 box[x], box[y] = box[y],", "+ box[y]) % 256])) return ''.join(out) block_size = 16 key_size", "= plain_text + pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes", "hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex') key = h[:n] print \"Encryption", "the integer value of the P entry to a 32-bit", "etree.tostring(self) xml = property(_to_xml) def _from_python(self, value): self.from_python(value) def _to_python(self):", "= dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\", "= self.data.value first = dictionary[\"First\"].value pointers = [int(x) for x", "class PDFR(PDFString): def from_python(self, (n,g)): assert type(n) == int and", "in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get", "''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph", "def from_python(self, value): assert value is None, 'Value must be", "d): assert key.tag == 'dictionary' self[0] = d dictionary =", "+ pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes", ", \"Wrong number of integer in the ObjStm begining\" pointers", "return self.to_python() value = property(_to_python,_from_python) def __getattr__(self, name): tags =", "= io_objstm.object.expandObjStm() #replace the object stream by its childs for", "), type(filters) != list and (type(params) == dict or params==None", "dict or params==None ) ]), 'Filter/DecodeParms wrong type' if type(filters)", "self.getObjectAt(startxref) return xref.dictionary def getRoot(self): ''' Get the pdf Root", "'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..'))", "PDF = PDFXMLFactory() def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs)", "child in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end = self.span self.span", "logging,code logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base pdf-xml class.", "(type(params) == dict or params==None ) ]), 'Filter/DecodeParms wrong type'", "''' if self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen() #leaf class", "dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e : pass dotdata", "# MD5 hash function, low-order byte first. WTF!!?? print \"MD5", "del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return", "assert g >= 0 and g < 65535 , 'Invalid", "m.update (ID) print \"MD5 update 4\", ID.encode('hex') #f) If document", "for u in self.pdf_update: for io in u[:]: if type(io)", "and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf", "import randpool import base64 def rc4crypt(data, key): x = 0", "null?' assert len(filters) == len(params),'Number of Decodeparams should match Filters'", "dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect", "for e in self]) if name in tags: return self.xpath('./%s'%name)", "\"%d %d\"%io.id if len(references) == 0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig)", "val): self[1] = val val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def", "dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\ type(dictionary['Filter']) == PDFArray and \\", "getRoot(self): ''' Get the pdf Root node of this update.", "1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the value of the encryption dictionary's", "= PDFNumber namespace['null'] = PDFNull namespace['bool'] = PDFBool namespace['R'] =", "pdf-xml class. Every pdf token xml representation will have a", "%d\"%io.id if len(references) == 0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else:", "text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count number of 'compressed' object streams", "class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def to_python(self): return tuple([e.value", "here def getObjectAt(self, pos): ''' Get the object found at", "payload, **my_kwargs) elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref',", "attribs.items(): xml.set(attr_key, str(attr_val)) for child in childs: xml.append(child) return xml", "assert self.dictionary.has_key('N'), \"N is mandatory in ObjStm dictionary\" assert self.dictionary.has_key('First'),", "type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key,", "box[y]) % 256])) return ''.join(out) block_size = 16 key_size =", "numbers, n and g' assert n >= 0 and n", "PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This", "False, 'true': True}[self.text] class PDFNull(PDFString): def from_python(self, value): assert value", "in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref", "['false','true'][int(value)] def to_python(self): return {'false': False, 'true': True}[self.text] class PDFNull(PDFString):", "with the value 0xFFFFFFFF to the MD5 hash function. if", "list and (type(params) == list or params==None ), type(filters) !=", "o object = property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for i", "return tuple([int(i) for i in self.get('id').split(' ')]) def _setid(self, o):", "the Trailer dictionary (of this update!)''' if startxref == None:", "be a boolean' self.text = ['false','true'][int(value)] def to_python(self): return {'false':", "return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0] def _setdictionary(self, d):", "byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get", "= 32 def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode =", "decrypt_xml(xml_element): n,g = xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff))", "a dictionary.. or null?' assert len(filters) == len(params),'Number of Decodeparams", "if name in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self):", "metadata is not being encrypted, pass 4 bytes with the", "encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number of", "pass dotdata += '}\\n' logger.info(\"Writing graph to %s(a dot file).", "hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print", "the containing pdf ''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class", "'Stream not Filtered!' selected_filter = None selected_params = None deletion_list", "= PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] =", "= set([e.tag for e in self]) if name in tags:", "size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter =", "and generation number of any pdf element ''' if self.tag.startswith('indirect'):", "_to_python(self): return self.to_python() value = property(_to_python,_from_python) def __getattr__(self, name): tags", "def defilter(self): try: while self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter", "def getRoot(self): ''' Get the pdf Root node. ''' return", "of Decodeparams should match Filters' return zip(filters,params) def popFilter(self): dictionary", "assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need array of", "streams ids/refs''' return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2", "\"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen()", "= m.digest()[:n] for i in range(0,50): h = hashlib.md5(h[:n]).digest() print", "= AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad] assert", "(exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return", "return ['',''] def getIndirectObject(self, ref): ''' Search for an indirect", "= etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name']", "= logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every pdf", "recursive=True): begin,end = self.span self.span = (begin+offset,end+offset) if recursive: for", "exactly 32 bytes. user_password = \"\" pad = \"<PASSWORD>\".decode('hex') print", "type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i):", "== int and type(g) == int, 'R must be two", "pad.encode('hex') #b) Initialize the MD5 hash function and pass the", "Decodeparams should match Filters' return zip(filters,params) def popFilter(self): dictionary =", "data data = property(_getdata,_setdata,None) def isFiltered(self): ''' Check if stream", "except Exception,e: logger.debug(\"Couldn't defilter <%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter", "self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true if pdf is encrypted", "of integer in the ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for", "indirect = create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array', [create_leaf('number', i)", "logger.info(\"Length field of object %s does not match the actual", "from_python(self, value): self.text = value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class", "recode defilterData .. make it register/unregister able. #(think /Crypt 7.4.10", "rc4crypt(data, key): x = 0 box = range(256) for i", "val = self.dictionary.value filters = val.get('Filter',None) params = val.get('DecodeParams',None) assert", "#completelly defilter the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the", ": pass dotdata += '}\\n' logger.info(\"Writing graph to %s(a dot", "and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value,", "iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get and", "\"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the value of the", "io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe objects inside it expanded_iobjects", "new indirect objects. ''' from opaflib.parser import parse assert not", "\"PAD: \", pad.encode('hex') #b) Initialize the MD5 hash function and", "stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self): '''", "Download graphviz or try this http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata)", "in the file ''' def _getspan(self): return tuple([int(i) for i", "import defilterData #Logging facility import logging,code logger = logging.getLogger(\"OPAFXML\") class", "[] if dictionary['Length'].value != len(self.data.value): logger.info(\"Length field of object %s", "dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) == PDFArray and \\ len(dictionary['DecodeParms']) ==", "= PDFXMLFactory() def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def", "if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if", "for u in self.pdf_update: if u.has_key(ref): return u[ref] def getRoot(self):", "class PDFString(PDFXML): def from_python(self, value): self.text = value.encode('string_escape') def to_python(self):", "in u[:]: if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered():", "and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm = self[ref]", "def __setitem__(self, key, val): if str == type(key): self.xpath('./entry/name[position()=1 and", "= (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def to_python(self): return", "for k in list(key)]) _buf1 = rc4crypt(_buf,_key) print \"RC4 iter(%d)", "print \"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] ))", "objects. ''' from opaflib.parser import parse assert not self.isFiltered(), \"ObjStm", "in data: x = (x + 1) % 256 y", "for i in self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset,", "_setobject(self, o): self[0] = o object = property(_getobject,_setobject,None) def _getid(self):", "<%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf == encrypt_py['U'][:16] print \"Authenticated!", "opaflib.parser import parse assert not self.isFiltered(), \"ObjStm should not be", "class PDFEntry(PDFXML): def to_python(self): return tuple([e.value for e in self.getchildren()])", "at certain byte position (only in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0]", "m.update(\"sAlT\") real_key = m.digest() pld = e.value if pld.endswith(\"\\x0d\\x0a\"): pld", "iter(%d) Encrypt data <%s> with key <%s> and it gives", "deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del dictionary['Filter'] if", "0xffffffff&encrypt_py['P'] )) #e) append ID ? #TODO, get the ID", "filters = val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters) == list", "expand them. Each ObjStm will be replaced by its childs", "able. #(think /Crypt 7.4.10 Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value,", "generation number of any pdf element ''' if self.tag.startswith('indirect'): return", "%s from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id =", "and pass the result of step (a) as input to", "self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value filters = val.get('Filter',None) params", "parsed_objects = [] for p in range(0,len(positions)-1): logger.info(\"Adding new object", "**my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag in ['indirect_object','dictionary', 'entry', 'array',", "of any pdf element ''' if self.tag.startswith('indirect'): return self.id else:", "the result of step (a) as input to this function.", "def isFiltered(self): ''' Check if stream is filtered ''' return", "startxref=None): ''' Get the pdf ID from the trailer dictionary", "it expanded_iobjects = io_objstm.object.expandObjStm() #replace the object stream by its", "begin,end = self.span self.span = (begin+offset,end+offset) if recursive: for child", "== 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\", "#parse the indirect simpe objects inside it expanded_iobjects = io_objstm.object.expandObjStm()", "Search the object and generation number of any pdf element", "def __getitem__(self, i): if str == type(i): return self.xpath('./entry/name[position()=1 and", "self] def getStartxref(self): ''' Get the last startxref pointer (should", "password encryption ''' import hashlib, struct from Crypto.Cipher import AES", "\"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20): _key = ''.join([chr(ord(k)^i) for k", "(An actual pass is not needed. Using null pass ''", "for x in params]), 'Params should be a dictionary.. or", "v,i in deletion_list: del v[i] dictionary['Length'].value = len(self.data.value) def defilter(self):", "dictionary's O entry to the MD5 hash function. m.update (encrypt_py['O'][:32])", "clear_span(self, recursive=True): del self.attrib['span'] for child in self.getchildren(): child.clear_span() span", "def _getval(self): return self[1] def _setval(self, val): self[1] = val", "#Get and print the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print", "'ObjStm' #completelly defilter the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse", "1) % 256 y = (y + box[x]) % 256", "return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return [e.value for e", "PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, \"Wrong number of children in", "<%s> with key <%s> and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf", "any([type(filters) == list and (type(params) == list or params==None ),", "m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest() pld = e.value if", "string and stream in place... for e in self.xpath('//stream/data'): decrypt_xml(e)", "number of children in indirect object\" return (self.id, self.object.value) def", "'\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e : pass dotdata += '}\\n'", "return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str == type(i): return", "dictionary (of this update!)''' if startxref == None: startxref =", "file ''' def _getspan(self): return tuple([int(i) for i in self.get('span').split('~')])", "assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the object stream while", "(encrypt_py['O'][:32]) print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer", "in self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self)", "len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and", "Exception,e: logger.debug(\"Couldn't defilter <%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s>", "(%d)'%n assert g >= 0 and g < 65535 ,", "return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def to_python(self):", "**kwargs) if __name__==\"__main__\": name = create_leaf('name', \"Name\") string = create_leaf('string',", "self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the object stream", "**attribs): assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf',", "== 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData ..", "all this to pdf_update and do the wrapper here def", "nodes_added.add(orig) nodes_added.add(dest) try: root = \"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\"", "dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del dictionary['Filter']", "Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or", "'Params should be a dictionary.. or null?' assert len(filters) ==", "class. Every pdf token xml representation will have a span", "for i in self.get('id').split(' ')]) def _setid(self, o): self.set('id', \"%d", "PDFString(PDFXML): def from_python(self, value): self.text = value.encode('string_escape') def to_python(self): return", "if type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if", "import AES from Crypto.Util import randpool import base64 def rc4crypt(data,", "PDFBool namespace['R'] = PDFR namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref", "PDFArray and \\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter']", "= m.digest() pld = e.value if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2]", "PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] = PDFData #trees namespace['entry'] =", "\"MD5 update 5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex') h =", "print \"PAD: \", pad.encode('hex') #b) Initialize the MD5 hash function", "be a names' assert all([type(x)==dict for x in params]), 'Params", "= \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key,", "AES from Crypto.Util import randpool import base64 def rc4crypt(data, key):", "_buf = _buf1 assert _buf == encrypt_py['U'][:16] print \"Authenticated! (An", "= 'null' def to_python(self): assert self.text == 'null', 'PDFNull xml", "% block_size data = plain_text + pad * chr(pad) iv_bytes", "AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) ==", "encrypt_py = encrypt.value print encrypt_py #Ok try to decrypt it", "in attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree def create_tree(self, tag,", "for x in filters]), 'Filter shall be a names' assert", "import parse assert not self.isFiltered(), \"ObjStm should not be compressed", "at this point\" assert self.dictionary.has_key('N'), \"N is mandatory in ObjStm", "This will try to decrypt V:4 null password encryption '''", "for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self):", "selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and", "return self[0] def _setdictionary(self, d): assert key.tag == 'dictionary' self[0]", "pdf is encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count", "encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py = encrypt.value print", "def _getdictionary(self): return self[0] def _setdictionary(self, d): assert key.tag ==", "u.has_key(ref): return u[ref] def getRoot(self): ''' Get the pdf Root", "for an indirect object ''' for u in self.pdf_update: if", "for u in self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref) def", "childs for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): '''", "x = self.text return float(int(float(x))) == float(x) and int(float(x)) or", ", 'Wrong type for startxref' self.text = str(value).encode('string_escape') def to_python(self):", "#a) Pad or truncate the password string to exactly 32", "\")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def", "pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex') print \"PAD: \",", "dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter = None selected_params = None", "type(value) == bool, 'Value must be a boolean' self.text =", "indirect objects. ''' from opaflib.parser import parse assert not self.isFiltered(),", "property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return dict([e.value for e in", "dotdata = \"digraph {\\n\" nodes_added = set() for io in", "(%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0]", "of the pdf ''' dotdata = \"digraph {\\n\" nodes_added =", "= dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray,", "filters need array of decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0))", "to %s(a dot file). Download graphviz or try this http://rise4fun.com/Agl", "= property(_getkey,_setkey,None) def _getval(self): return self[1] def _setval(self, val): self[1]", "''' if startxref == None: startxref = self.getStartxref().value xref =", "encrypt_py['V'] == 4, \"Sorry only Version 4 supported\" assert encrypt_py['R']", "to_python(self): return tuple([int(i) for i in self.text.split(' ')]) def solve(self):", "append ID ? #TODO, get the ID from the trailer..", "= [] for char in data: x = (x +", "x in data[:first].split()] assert len(pointers)%2 == 0 , \"Wrong number", "= dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary,", "namespace['R'] = PDFR namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref namespace['data']", "childs ''' for u in self.pdf_update: for ref in u.findAllObjStm():", "of filters need array of decoding params' selected_params = dictionary['DecodeParms'][0]", "io in u[:]: if type(io) == PDFIndirect and io.isStream() and", "for child in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end = self.span", "''' Search the object and generation number of any pdf", "any pdf element ''' if self.tag.startswith('indirect'): return self.id else: return", "mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get", "== type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def", "def isEncrypted(self): ''' Return true if pdf is encrypted '''", "value): self.from_python(value) def _to_python(self): return self.to_python() value = property(_to_python,_from_python) def", "not being encrypted, pass 4 bytes with the value 0xFFFFFFFF", "and text()=\"%s\"]'%key))>0 def __getitem__(self, i): if str == type(i): return", "and xref[0].tag == 'dictionary' return xref[0] def getID(self, startxref=None): '''", "if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) == PDFArray and \\ len(dictionary['DecodeParms'])", "return xref.dictionary def getRoot(self): ''' Get the pdf Root node", "xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']:", "range(0,len(pointers),2) ]) positions = sorted(pointers.keys() + [len(data)]) parsed_objects = []", "self.text = str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass", "PDFPdf(PDFXML): def to_python(self): return [e.value for e in self] def", "decrypt V:4 null password encryption ''' import hashlib, struct from", "dictionary.has_key('Filter') and \\ type(dictionary['Filter']) == PDFArray and \\ len(dictionary['Filter']) ==", "for e in self] class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1,", "from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0)", "\", pad.encode('hex') #b) Initialize the MD5 hash function and pass", "actual data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray:", "If document metadata is not being encrypted, pass 4 bytes", "float(int(float(x))) == float(x) and int(float(x)) or float(x) class PDFStartxref(PDFString): def", "def _setdata(self, data): assert data.tag == 'data' self[1] = data", "string = create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry])", "namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream namespace['pdf']", "text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class", "(%d)'%g self.text = \"%d %d\"%(n,g) def to_python(self): return tuple([int(i) for", "def from_python(self, value): assert type(value) == bool, 'Value must be", "and g' assert n >= 0 and n < 65535", "if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self,", "PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self, value): assert type(value) ==", "io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the", "(ID) print \"MD5 update 4\", ID.encode('hex') #f) If document metadata", "encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return", "_to_xml(self): return etree.tostring(self) xml = property(_to_xml) def _from_python(self, value): self.from_python(value)", "= self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else: return ['',''] def", "ID.encode('hex') #f) If document metadata is not being encrypted, pass", "property(_getkey,_setkey,None) def _getval(self): return self[1] def _setval(self, val): self[1] =", "self.object.value) def _getobject(self): return self[0] def _setobject(self, o): self[0] =", "def __init__(self): self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup =", "''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else: return", "self.pdf_update[-1].startxref[-1] #FIX move all this to pdf_update and do the", "from_python(self, value): assert type(value) == bool, 'Value must be a", "encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print \"MD5 update 5\", ('\\xff'*4).encode('hex') print", "== type(key): key = \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key)", "_setkey(self, key): assert key.tag == 'name' self[0] = key key", "PDFStartxref namespace['data'] = PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary'] =", "supported\" #password length n = encrypt_py['Length']/8 print \"N:\",n #a) Pad", "update 5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex') h = m.digest()[:n]", "self.xpath('./indirect_object')]) def has_key(self,key): key = \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def", "print \"MD5 update 5\", ('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex') h", "filters=[filters] params=params and [params] or [{}] if params == None:", "hash function and pass the result of step (a) as", "xref[0].tag == 'dictionary' return xref[0] def getID(self, startxref=None): ''' Get", "object %s does not match the actual data size (%d", "<%s> and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert", "__init__(self): self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback)", "be replaced by its childs ''' for u in self.pdf_update:", "the password string to exactly 32 bytes. user_password = \"\"", "256 box[i], box[x] = box[x], box[i] x = 0 y", "= create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array',", "hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the", "\"Wrong number of integer in the ObjStm begining\" pointers =", "the pdf ''' dotdata = \"digraph {\\n\" nodes_added = set()", "pass #tree class PDFEntry(PDFXML): def to_python(self): return tuple([e.value for e", "\\ type(dictionary['Filter']) == PDFArray and \\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary,", "\"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root = \"%d %d\"%self.getRoot() dotdata", "(self.id, self.object.value) def _getobject(self): return self[0] def _setobject(self, o): self[0]", "def to_python(self): return [e.value for e in self] class PDFIndirect(PDFXML):", "and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref in self.findAllObjStm():", "16 key_size = 32 def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size", "self.set('id', \"%d %d\"%o) id = property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1", "def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode = AES.MODE_CBC pad", "update!)''' if startxref == None: startxref = self.getStartxref().value xref =", "def to_python(self): return tuple([int(i) for i in self.text.split(' ')]) def", "len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return [e.value for e in", "del self.attrib['span'] for child in self.getchildren(): child.clear_span() span = property(_getspan,_setspan)", "key_size mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes,", "''' Return true if pdf is encrypted ''' return self.getTrailer().has_key('Encrypt')", "self.dictionary.value filters = val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters) ==", "xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key,", "')]) def _setid(self, o): self.set('id', \"%d %d\"%o) id = property(_getid,_setid,None)", "len(key_bytes) == key_size mode = AES.MODE_CBC pad = block_size -", "should be a dictionary.. or null?' assert len(filters) == len(params),'Number", "should match Filters' return zip(filters,params) def popFilter(self): dictionary = self.dictionary", "i in range(0,len(pointers),2) ]) positions = sorted(pointers.keys() + [len(data)]) parsed_objects", "== len(params),'Number of Decodeparams should match Filters' return zip(filters,params) def", "decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter", "layed in the file ''' def _getspan(self): return tuple([int(i) for", "supported\" assert encrypt_py['R'] == 4, \"Sorry only Version 4 supported\"", "the referenced indirect object in the containing pdf ''' pdf", "#Factory class PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser() fallback =", "try to decrypt it ... assert encrypt_py['V'] == 4, \"Sorry", "isEncrypted(self): ''' Return true if pdf is encrypted ''' return", "match the actual data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter'])", "'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs)", "n,g = xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff))", "(struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append ID ? #TODO, get the", "range(0,50): h = hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex') key =", "if pdf is encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): '''", "self.to_python() value = property(_to_python,_from_python) def __getattr__(self, name): tags = set([e.tag", "= ''.join([chr(ord(k)^i) for k in list(key)]) _buf1 = rc4crypt(_buf,_key) print", "dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need array", "''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number of 'compressed'", "and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value", "defilter <%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def", "assert not self.isFiltered(), \"ObjStm should not be compressed at this", "two numbers, n and g' assert n >= 0 and", "(begin+offset,end+offset) if recursive: for child in self.getchildren(): child.span_move(offset) def span_expand(self,span):", "box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y])", "PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] =", "and \\ type(dictionary['DecodeParms']) == PDFArray and \\ len(dictionary['DecodeParms']) == 0:", "= val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters) == list and", "its childs for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self):", "[e.value for e in self] class PDFIndirect(PDFXML): def to_python(self): assert", "create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array', [create_leaf('number',", "def _getdata(self): return self[1] def _setdata(self, data): assert data.tag ==", "namespace = lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string'] = PDFString", "= PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] = PDFData #trees namespace['entry']", "object found at certain byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def", "one) ''' if startxref == None: startxref = self.getStartxref().value xref", "in [int, float], 'Wrong type for a number' self.text =", "PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms']", "**my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag, value, **kwargs):", "stream in place... for e in self.xpath('//stream/data'): decrypt_xml(e) for e", "byte first. WTF!!?? print \"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update", "self.text = \"%d %d\"%(n,g) def to_python(self): return tuple([int(i) for i", "_getspan(self): return tuple([int(i) for i in self.get('span').split('~')]) def _setspan(self, value):", "WTF!!?? print \"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P']", "PDFEntry(PDFXML): def to_python(self): return tuple([e.value for e in self.getchildren()]) def", "out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256])) return ''.join(out) block_size", "the ID from the trailer.. ID = '' m.update (ID)", "key = \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self,", "of decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else:", "for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for child in", "data[:first].split()] assert len(pointers)%2 == 0 , \"Wrong number of integer", "\"It's ENCRYPTED!\" encrypt_py = encrypt.value print encrypt_py #Ok try to", "''' Get the object found at certain byte position (only", "selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] ==", "to this function. m = hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update", "least one) ''' return self.pdf_update[-1].startxref[-1] #FIX move all this to", "the pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): '''", "class PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every pdf token xml", "_setdata(self, data): assert data.tag == 'data' self[1] = data data", "truncate the password string to exactly 32 bytes. user_password =", "point\" assert self.dictionary.has_key('N'), \"N is mandatory in ObjStm dictionary\" assert", "self[0] def _setdictionary(self, d): assert key.tag == 'dictionary' self[0] =", "'R must be two numbers, n and g' assert n", "self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple == type(key):", "hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key =", "e.value=pld #decrypt every string and stream in place... for e", "''' Get the pdf Root node of this update. '''", "(min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span'] for child in self.getchildren():", "float(x) and int(float(x)) or float(x) class PDFStartxref(PDFString): def from_python(self, value):", "in indirect object\" return (self.id, self.object.value) def _getobject(self): return self[0]", "Each ObjStm will be replaced by its childs ''' for", "super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get the object found at", "io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed' object streams ids/refs'''", "= PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects", "if u.has_key(ref): return u[ref] def getRoot(self): ''' Get the pdf", "return tuple([int(i) for i in self.text.split(' ')]) def solve(self): '''", "return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value): assert type(value) in", "else: return ['',''] def getIndirectObject(self, ref): ''' Search for an", "graph to %s(a dot file). Download graphviz or try this", "Version 4 supported\" assert encrypt_py['R'] == 4, \"Sorry only Version", "#Tree def create_tree(self, tag, *childs, **attribs): assert tag in ['indirect_object','dictionary',", "= (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span'] for child in", "= property(_to_xml) def _from_python(self, value): self.from_python(value) def _to_python(self): return self.to_python()", "0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms'])", "%s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return true", "hash function. m.update (encrypt_py['O'][:32]) print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d)", "self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end =", "entry to a 32-bit unsigned binary number and pass these", "to the # MD5 hash function, low-order byte first. WTF!!??", "wrapper here def getObjectAt(self, pos): ''' Get the object found", "try to decrypt V:4 null password encryption ''' import hashlib,", "params]), 'Params should be a dictionary.. or null?' assert len(filters)", "assert len(key_bytes) == key_size mode = AES.MODE_CBC pad = block_size", "return None class PDFR(PDFString): def from_python(self, (n,g)): assert type(n) ==", "found at certain byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self,", "pdf element ''' if self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen()", "create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data =", "#f) If document metadata is not being encrypted, pass 4", "= property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self):", "m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest() pld", "all([type(x)==dict for x in params]), 'Params should be a dictionary..", "= box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256]))", "user_password = \"\" pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex')", "_setid(self, o): self.set('id', \"%d %d\"%o) id = property(_getid,_setid,None) def isStream(self):", "(should be at least one) ''' return self.pdf_update[-1].startxref[-1] #FIX move", "h = m.digest()[:n] for i in range(0,50): h = hashlib.md5(h[:n]).digest()", "% 256 y = (y + box[x]) % 256 box[x],", "0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r in references:", "= _buf1 assert _buf == encrypt_py['U'][:16] print \"Authenticated! (An actual", "for e in self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e)", "hash function, low-order byte first. WTF!!?? print \"MD5 update 3\",", "indirect object in the containing pdf ''' pdf = self.xpath('/*')[0]", "% len(key)])) % 256 box[i], box[x] = box[x], box[i] x", "\"Got wrong tree tag: %s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1))", "create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs):", "and \\ type(dictionary['Filter']) == PDFArray and \\ len(dictionary['Filter']) == 0:", "= PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] =", "dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms']", "struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append ID ?", "#TODO, get the ID from the trailer.. ID = ''", "= (x + 1) % 256 y = (y +", "def isObjStm(self): ''' Return true if this is an object", "i) for i in range(0,10)]) xml=indirect print etree.tostring(xml), xml.value import", "return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary", "def to_python(self): return {'false': False, 'true': True}[self.text] class PDFNull(PDFString): def", "render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find all object streams", "= create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream =", "this update!)''' if startxref == None: startxref = self.getStartxref().value xref", "i in range(0,50): h = hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex')", "%s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key,", "tag, *childs, **attribs): assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream',", "from opaflib.parser import parse assert not self.isFiltered(), \"ObjStm should not", "at least one) ''' return self.pdf_update[-1].startxref[-1] #FIX move all this", "self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self, value): self.text = value.encode('string_escape')", "array = create_tree('array', [create_leaf('number', i) for i in range(0,10)]) xml=indirect", "position (only in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None):", "recursive=True): del self.attrib['span'] for child in self.getchildren(): child.clear_span() span =", "range(256): x = (x + box[i] + ord(key[i % len(key)]))", "deletion_list = [] if dictionary['Length'].value != len(self.data.value): logger.info(\"Length field of", "del v[i] dictionary['Length'].value = len(self.data.value) def defilter(self): try: while self.isFiltered():", "m.digest()[:n] for i in range(0,50): h = hashlib.md5(h[:n]).digest() print \"Encryption", "in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag: %s\"%tag xml = self.parser.makeelement(tag)", "at least one) ''' if startxref == None: startxref =", "= encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1])", "= box[x], box[i] x = 0 y = 0 out", "object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe objects", "text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str == type(i):", "class PDFStartxref(PDFString): def from_python(self, value): assert type(value) == int ,", "stream_data = create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream],", "hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20): _key", "(n,g)): assert type(n) == int and type(g) == int, 'R", "= self.dictionary.value filters = val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters)", "_getdictionary(self): return self[0] def _setdictionary(self, d): assert key.tag == 'dictionary'", "float], 'Wrong type for a number' self.text = str(value) def", "= PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] =", "== key_size mode = AES.MODE_CBC pad = block_size - len(plain_text)", "selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter']", "len(filters) == len(params),'Number of Decodeparams should match Filters' return zip(filters,params)", "shall be a names' assert all([type(x)==dict for x in params]),", "!= list: filters=[filters] params=params and [params] or [{}] if params", "class PDFName(PDFString): pass class PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self,", "*childs, **attribs): assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref',", "AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad", "of step (a) as input to this function. m =", "defilterData #Logging facility import logging,code logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase):", "assert all([type(x)==str for x in filters]), 'Filter shall be a", "streams and expand them. Each ObjStm will be replaced by", "DIGEST:\", m.digest().encode('hex') h = m.digest()[:n] for i in range(0,50): h", "self.getchildren()]) def _getkey(self): return self[0] def _setkey(self, key): assert key.tag", "set([e.tag for e in self]) if name in tags: return", "self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number of 'compressed' object streams", "key.tag == 'dictionary' self[0] = d dictionary = property(_getdictionary,_setdictionary,None) def", "_setval(self, val): self[1] = val val = property(_getval,_setval,None) class PDFDictionary(PDFXML):", "in attribs.items(): xml.set(attr_key, str(attr_val)) for child in childs: xml.append(child) return", "return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val):", "4, \"Sorry only Version 4 supported\" assert encrypt_py['R'] == 4,", "return ''.join(out) block_size = 16 key_size = 32 def encrypt(plain_text,key_bytes):", "m.update((user_password+pad)[:32]) print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the value", "== 'dictionary' return xref[0] def getID(self, startxref=None): ''' Get the", "create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object',", "Root node of this update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self):", "of 'compressed' object streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): '''", "+ AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes)", "len(params),'Number of Decodeparams should match Filters' return zip(filters,params) def popFilter(self):", "ID from the trailer.. ID = '' m.update (ID) print", "def __getattr__(self, name): tags = set([e.tag for e in self])", "Pass the value of the encryption dictionary's O entry to", "Find all object streams and expand them. Each ObjStm will", "= hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20):", "Count number of 'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and", "[{}] if params == None: params = [{}]*len(filters) assert all([type(x)==str", "will be replaced by its childs ''' for u in", "(ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self):", "object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def", "create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data])", "bytes with the value 0xFFFFFFFF to the MD5 hash function.", "== 'ObjStm' def expandObjStm(self): ''' This parses the ObjStm structure", "dictionary['DecodeParms'] == PDFArray, 'Array of filters need array of decoding", "tag: %s\"%tag xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for", "0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append ID ? #TODO,", "- len(plain_text) % block_size data = plain_text + pad *", "update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer", "print \"Try to authenticate\" _buf = hashlib.md5(pad + ID).digest() print", "number (%d)'%n assert g >= 0 and g < 65535", "\"Encryption KEY(%d)\"%i, h.encode('hex') key = h[:n] print \"Encryption KEY\", key.encode('hex')", "4 supported\" assert encrypt_py['R'] == 4, \"Sorry only Version 4", "return plain_text[:-pad] assert self.isEncrypted() #Get and print the encryption dictionary", "not Filtered!' selected_filter = None selected_params = None deletion_list =", "self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return", "or try this http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self):", "dictionary['Length'].value != len(self.data.value): logger.info(\"Length field of object %s does not", "for i in range(0,20): _key = ''.join([chr(ord(k)^i) for k in", "\"%d %d\"%o) id = property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class", "is mandatory in ObjStm dictionary\" dictionary = self.dictionary data =", "self.text.split(' ')]) def solve(self): ''' search the referenced indirect object", "return trailer['ID'] else: return ['',''] def getIndirectObject(self, ref): ''' Search", "*childs, **kwargs) if __name__==\"__main__\": name = create_leaf('name', \"Name\") string =", "e.value if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld = decrypt(pld,real_key) e.value=pld", "def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (should be", "% 256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x]", "number of 'compressed' object streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'):", "and n < 65535 , 'Invalid object number (%d)'%n assert", "(should be at least one) ''' if startxref == None:", "dictionary.. or null?' assert len(filters) == len(params),'Number of Decodeparams should", "in deletion_list: del v[i] dictionary['Length'].value = len(self.data.value) def defilter(self): try:", "0 y = 0 out = [] for char in", "for e in self.getchildren()]) def _getkey(self): return self[0] def _setkey(self,", "pld = e.value if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld =", "#replace the object stream by its childs for new_io in", "object ''' for u in self.pdf_update: if u.has_key(ref): return u[ref]", "self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if str ==", "if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag, payload,", "= 0 box = range(256) for i in range(256): x", "= self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in", "namespace['number'] = PDFNumber namespace['null'] = PDFNull namespace['bool'] = PDFBool namespace['R']", "and type(g) == int, 'R must be two numbers, n", "field of object %s does not match the actual data", "low-order byte first. WTF!!?? print \"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex')", "self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter <%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't", "get the ID from the trailer.. ID = '' m.update", "''' def _getspan(self): return tuple([int(i) for i in self.get('span').split('~')]) def", "xml = self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val", "['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], \"Got wrong tree", "== PDFArray, 'Array of filters need array of decoding params'", "== 0 , \"Wrong number of integer in the ObjStm", "password string to exactly 32 bytes. user_password = \"\" pad", "in the ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for i in", "print \"Encryption KEY\", key.encode('hex') print \"Try to authenticate\" _buf =", "''' Check if stream is filtered ''' return self.dictionary.has_key('Filter') def", "#del dictionary['DecodeParms'] #FIX recode defilterData .. make it register/unregister able.", "encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m", "256 box[x], box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] +", "str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def", "box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256])) return", "[create_leaf('number', i) for i in range(0,10)]) xml=indirect print etree.tostring(xml), xml.value", "del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del", "__delitem__(self, i): if str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0])", "graphviz or try this http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata) def", "namespace['bool'] = PDFBool namespace['R'] = PDFR namespace['header'] = PDFHeader namespace['startxref']", "child.clear_span() span = property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml =", "\"%s\";\\n'%root except Exception,e : pass dotdata += '}\\n' logger.info(\"Writing graph", "#leaf class PDFString(PDFXML): def from_python(self, value): self.text = value.encode('string_escape') def", "return self.id else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self,", "+= '\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root =", "_setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end = self.span self.span", "def getStartxref(self): ''' Get the last startxref pointer (should be", "stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0)) array =", "= self.span self.span = (begin+offset,end+offset) if recursive: for child in", "in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return dict([e.value for", "for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self):", "u in self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self):", "namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs): assert", "val): if str == type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val)", "pdf ID from the trailer dictionary ''' trailer = self.getTrailer(startxref).value", "it ... assert encrypt_py['V'] == 4, \"Sorry only Version 4", "Version 4 supported\" #password length n = encrypt_py['Length']/8 print \"N:\",n", "= dictionary[\"First\"].value pointers = [int(x) for x in data[:first].split()] assert", "in self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): '''", "#tree class PDFEntry(PDFXML): def to_python(self): return tuple([e.value for e in", "dictionary = self.dictionary data = self.data.value first = dictionary[\"First\"].value pointers", "number (%d)'%g self.text = \"%d %d\"%(n,g) def to_python(self): return tuple([int(i)", "#(think /Crypt 7.4.10 Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params", "class PDFPdf(PDFXML): def to_python(self): return [e.value for e in self]", "range(256) for i in range(256): x = (x + box[i]", "plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad]", "\"Encryption KEY\", key.encode('hex') print \"Try to authenticate\" _buf = hashlib.md5(pad", "''' Count number of 'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1", "actual pass is not needed. Using null pass '' )\"", "self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig = \"%d %d\"%io.id if len(references)", "or params==None ) ]), 'Filter/DecodeParms wrong type' if type(filters) !=", "value of the encryption dictionary's O entry to the MD5", "d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def _setdata(self,", "''' Get the last startxref pointer (should be at least", "streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self,", "str == type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def", "this to pdf_update and do the wrapper here def getObjectAt(self,", "property(_to_python,_from_python) def __getattr__(self, name): tags = set([e.tag for e in", "tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], \"Got", "self[1] def _setdata(self, data): assert data.tag == 'data' self[1] =", "io.object.defilter() def decrypt(self): ''' This will try to decrypt V:4", "indirect object\" return (self.id, self.object.value) def _getobject(self): return self[0] def", "key, val): if str == type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else:", "startxref pointer (should be at least one) ''' return self.pdf_update[-1].startxref[-1]", "and int(float(x)) or float(x) class PDFStartxref(PDFString): def from_python(self, value): assert", "box = range(256) for i in range(256): x = (x", "childs: xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs): if tag in", "''' Base pdf-xml class. Every pdf token xml representation will", "self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params) for v,i", "to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def", "= None selected_params = None deletion_list = [] if dictionary['Length'].value", "findAllObjStm(self): ''' Search 'compressed' object streams ids/refs''' return [io.id for", "[len(data)]) parsed_objects = [] for p in range(0,len(positions)-1): logger.info(\"Adding new", "ord(key[i % len(key)])) % 256 box[i], box[x] = box[x], box[i]", "xref[0] def getID(self, startxref=None): ''' Get the pdf ID from", "object found at certain byte position (only in this update!)'''", "dictionary ''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else:", "file). Download graphviz or try this http://rise4fun.com/Agl for render it.\"%dot)", "if __name__==\"__main__\": name = create_leaf('name', \"Name\") string = create_leaf('string', \"Felipe\")", "return xref[0] def getID(self, startxref=None): ''' Get the pdf ID", "assert type(value) in [int, float], 'Wrong type for a number'", "== 'data' self[1] = data data = property(_getdata,_setdata,None) def isFiltered(self):", "for e in self] def getStartxref(self): ''' Get the last", "\"Authenticated! (An actual pass is not needed. Using null pass", "__getitem__(self, i): if str == type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0]", "namespace['data'] = PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary", "self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed' object streams ids/refs''' return", "def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode = AES.MODE_CBC iv_bytes", "PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray", "xml #Tree def create_tree(self, tag, *childs, **attribs): assert tag in", "return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the object", "= \"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex') print \"PAD: \", pad.encode('hex')", "'null' def to_python(self): assert self.text == 'null', 'PDFNull xml not", "Get the pdf Root node of this update. ''' return", "or truncate the password string to exactly 32 bytes. user_password", "#b) Initialize the MD5 hash function and pass the result", "encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print \"MD5 update 5\",", "i in range(0,10)]) xml=indirect print etree.tostring(xml), xml.value import code code.interact(local=locals())", "AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:]) pad = ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted()", "= defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params) for v,i in", "zip(filters,params) def popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream not", "in self] class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, \"Wrong number", "the trailer dictionary ''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return", "if dictionary.has_key('Filter') and \\ type(dictionary['Filter']) == PDFArray and \\ len(dictionary['Filter'])", "= [] if dictionary['Length'].value != len(self.data.value): logger.info(\"Length field of object", "of the P entry to a 32-bit unsigned binary number", "trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else: return ['','']", "and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory", "self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): ''' This parses", "0 out = [] for char in data: x =", "PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class", "type(n) == int and type(g) == int, 'R must be", "getFilters(self): val = self.dictionary.value filters = val.get('Filter',None) params = val.get('DecodeParams',None)", "print \"Encryption KEY(%d)\"%i, h.encode('hex') key = h[:n] print \"Encryption KEY\",", "for a number' self.text = str(value) def to_python(self): x =", "= randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return", "PDFArray and \\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms']", "assert data.tag == 'data' self[1] = data data = property(_getdata,_setdata,None)", "]) positions = sorted(pointers.keys() + [len(data)]) parsed_objects = [] for", "be two numbers, n and g' assert n >= 0", "this http://rise4fun.com/Agl for render it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find", "__setitem__(self, key, val): if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else:", "value,**kwargs) def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs) if", "selected_params.value or selected_params) for v,i in deletion_list: del v[i] dictionary['Length'].value", "child in self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def _to_xml(self): return", "{'false': False, 'true': True}[self.text] class PDFNull(PDFString): def from_python(self, value): assert", "dict([e.value for e in self.xpath('./indirect_object')]) def has_key(self,key): key = \"%d", "= \"%d %d\"%io.id if len(references) == 0: dotdata += '\\t\"%s\";\\n'%x", "mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size", "property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml = property(_to_xml) def _from_python(self,", "+ box[i] + ord(key[i % len(key)])) % 256 box[i], box[x]", "'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\ type(dictionary['Filter']) == PDFArray", "key = \"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self,", "do the wrapper here def getObjectAt(self, pos): ''' Get the", "getRoot(self): ''' Get the pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object", "with key <%s> and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf =", "== None: params = [{}]*len(filters) assert all([type(x)==str for x in", "'stream'] and xref[0].tag == 'dictionary' return xref[0] def getID(self, startxref=None):", "return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF", "\"N:\",n #a) Pad or truncate the password string to exactly", "== encrypt_py['U'][:16] print \"Authenticated! (An actual pass is not needed.", "a boolean' self.text = ['false','true'][int(value)] def to_python(self): return {'false': False,", "**kwargs): return PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\": name = create_leaf('name',", "AES.MODE_CBC pad = block_size - len(plain_text) % block_size data =", "ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self): self.parser", "= property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml = property(_to_xml) def", "#del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\ type(dictionary['Filter']) == PDFArray and", "= None deletion_list = [] if dictionary['Length'].value != len(self.data.value): logger.info(\"Length", "{'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0] def _setdictionary(self, d): assert", "dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array of filters", "self[0] def _setkey(self, key): assert key.tag == 'name' self[0] =", "trailer dictionary ''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID']", "dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) == PDFArray and \\", "getObjectAt(self, pos): ''' Get the object found at certain byte", "MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4)", "should not be compressed at this point\" assert self.dictionary.has_key('N'), \"N", "def countIObj(self): ''' Count number of 'compressed' object streams '''", "with all the new indirect objects. ''' from opaflib.parser import", "= create_leaf('data',\"A\"*100) stream = create_tree('stream',[dictionary,stream_data]) indirect = create_tree('indirect_object', [stream], obj=(1,0))", "self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream'] and", "return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self, i): if str ==", "register/unregister able. #(think /Crypt 7.4.10 Crypt Filter ) self.data.value =", "y = 0 out = [] for char in data:", "def _from_python(self, value): self.from_python(value) def _to_python(self): return self.to_python() value =", "if recursive: for child in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end", "io in self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig = \"%d %d\"%io.id", "== list or params==None ), type(filters) != list and (type(params)", "try: while self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter <%s> stream", "update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append", "them. Each ObjStm will be replaced by its childs '''", "PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0]", "box[x] = box[x], box[i] x = 0 y = 0", "data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1 assert _buf == encrypt_py['U'][:16] print", "tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag:", "the MD5 hash function and pass the result of step", "bytes. user_password = \"\" pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD: \",", "return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self, value): self.text =", "if str == type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i)", "def from_python(self, value): self.text = value.encode('string_escape') def to_python(self): return self.text.decode('string_escape')", "_from_python(self, value): self.from_python(value) def _to_python(self): return self.to_python() value = property(_to_python,_from_python)", "= self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value): assert", "def _setdictionary(self, d): assert key.tag == 'dictionary' self[0] = d", "type(value) == int , 'Wrong type for startxref' self.text =", "return float(int(float(x))) == float(x) and int(float(x)) or float(x) class PDFStartxref(PDFString):", "def _setid(self, o): self.set('id', \"%d %d\"%o) id = property(_getid,_setid,None) def", "self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): ''' This parses the ObjStm", "in range(0,len(positions)-1): logger.info(\"Adding new object %s from objectstream\"%repr((pointers[positions[p]],0))) io =", "must be None' self.text = 'null' def to_python(self): assert self.text", "for p in range(0,len(positions)-1): logger.info(\"Adding new object %s from objectstream\"%repr((pointers[positions[p]],0)))", "== int , 'Wrong type for startxref' self.text = str(value).encode('string_escape')", "'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..'))", "def _getid(self): return tuple([int(i) for i in self.get('id').split(' ')]) def", "list and (type(params) == dict or params==None ) ]), 'Filter/DecodeParms", "Get the last startxref pointer (should be at least one)", "\"MD5 update 4\", ID.encode('hex') #f) If document metadata is not", "in self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig = \"%d %d\"%io.id if", "of this update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count", "def _setobject(self, o): self[0] = o object = property(_getobject,_setobject,None) def", "assert any([type(filters) == list and (type(params) == list or params==None", "dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array", "attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return xml", "self[0] = o object = property(_getobject,_setobject,None) def _getid(self): return tuple([int(i)", "data[positions[p]:positions[p+1]]+\" \")) io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML):", "selected_params = None deletion_list = [] if dictionary['Length'].value != len(self.data.value):", "or selected_params) for v,i in deletion_list: del v[i] dictionary['Length'].value =", "val.get('DecodeParams',None) assert any([type(filters) == list and (type(params) == list or", "in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find all object streams", "'compressed' object streams ids/refs''' return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1", "type(filters) != list: filters=[filters] params=params and [params] or [{}] if", "the value of the encryption dictionary's O entry to the", "self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser() fallback", "params==None ), type(filters) != list and (type(params) == dict or", "set() for io in self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig =", "value is None, 'Value must be None' self.text = 'null'", "the pdf ID from the trailer dictionary ''' trailer =", "print \"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g =", "and (type(params) == dict or params==None ) ]), 'Filter/DecodeParms wrong", "dictionary (should be at least one) ''' if startxref ==", "assert n >= 0 and n < 65535 , 'Invalid", "if type(filters) != list: filters=[filters] params=params and [params] or [{}]", "Check if stream is filtered ''' return self.dictionary.has_key('Filter') def getFilters(self):", "r in references: dest = \"%d %d\"%r.value dotdata += '\\t\"%s\"", ", 'Invalid object number (%d)'%n assert g >= 0 and", "['',''] def getIndirectObject(self, ref): ''' Search for an indirect object", "= 0 out = [] for char in data: x", "key_size mode = AES.MODE_CBC pad = block_size - len(plain_text) %", "PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate", "create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array', [create_leaf('number', i) for i", "[int, float], 'Wrong type for a number' self.text = str(value)", "object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def", "ref): ''' Search for an indirect object ''' for u", "str(attr_val)) return xml #Tree def create_tree(self, tag, *childs, **attribs): assert", "for i in range(256): x = (x + box[i] +", "getID(self, startxref=None): ''' Get the pdf ID from the trailer", "return zip(filters,params) def popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream", "PDFArray(PDFXML): def to_python(self): return [e.value for e in self] class", "self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True): del self.attrib['span'] for", "\"Sorry only Version 4 supported\" assert encrypt_py['R'] == 4, \"Sorry", "{\\n\" nodes_added = set() for io in self.pdf_update.indirect_object: references =", "return etree.tostring(self) xml = property(_to_xml) def _from_python(self, value): self.from_python(value) def", "in list(key)]) _buf1 = rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt data", "streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot", "V:4 null password encryption ''' import hashlib, struct from Crypto.Cipher", "all the new indirect objects. ''' from opaflib.parser import parse", "io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This will try to decrypt", "in self.get('id').split(' ')]) def _setid(self, o): self.set('id', \"%d %d\"%o) id", "is encrypted ''' return self.getTrailer().has_key('Encrypt') def countObjStm(self): ''' Count number", "all object streams and expand them. Each ObjStm will be", "KEY(%d)\"%i, h.encode('hex') key = h[:n] print \"Encryption KEY\", key.encode('hex') print", "object and generation number of any pdf element ''' if", "Encrypt data <%s> with key <%s> and it gives data", "= PDFR namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] =", "dotdata += '}\\n' logger.info(\"Writing graph to %s(a dot file). Download", "... assert encrypt_py['V'] == 4, \"Sorry only Version 4 supported\"", "ID = '' m.update (ID) print \"MD5 update 4\", ID.encode('hex')", "def to_python(self): return dict([e.value for e in self.getchildren()]) def has_key(self,key):", "def to_python(self): x = self.text return float(int(float(x))) == float(x) and", "= str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree", "self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'],", "of object %s does not match the actual data size", "array of decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0]", "\"First is mandatory in ObjStm dictionary\" dictionary = self.dictionary data", "i in range(256): x = (x + box[i] + ord(key[i", "== false: m.update('\\xff'*4) print \"MD5 update 5\", ('\\xff'*4).encode('hex') print \"1rst", "%d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if", "class PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self, value): assert type(value)", "''' Get the pdf ID from the trailer dictionary '''", "= self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the object", "PDFNull(PDFString): def from_python(self, value): assert value is None, 'Value must", "mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode,", "value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs): return", "Convert the integer value of the P entry to a", "print \"It's ENCRYPTED!\" encrypt_py = encrypt.value print encrypt_py #Ok try", "7.4.10 Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value", "the ObjStm structure and replace it with all the new", "''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true if pdf", "from_python(self, value): assert value is None, 'Value must be None'", "\"Felipe\") entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100)", "= create_leaf('name', \"Name\") string = create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string])", "assert type(n) == int and type(g) == int, 'R must", "'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData .. make it register/unregister", "for child in childs: xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs):", "step (a) as input to this function. m = hashlib.md5()", "def from_python(self, (n,g)): assert type(n) == int and type(g) ==", "'Invalid generation number (%d)'%g self.text = \"%d %d\"%(n,g) def to_python(self):", "decrypt(self): ''' This will try to decrypt V:4 null password", "= property(_to_python,_from_python) def __getattr__(self, name): tags = set([e.tag for e", "for i in range(0,len(pointers),2) ]) positions = sorted(pointers.keys() + [len(data)])", "logger.info(\"Writing graph to %s(a dot file). Download graphviz or try", "parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def to_python(self): return [e.value for", "span_move(self,offset, recursive=True): begin,end = self.span self.span = (begin+offset,end+offset) if recursive:", "key = \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if", "or [{}] if params == None: params = [{}]*len(filters) assert", "= ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get and print the", "its childs ''' for u in self.pdf_update: for io in", "''' dotdata = \"digraph {\\n\" nodes_added = set() for io", "dot file). Download graphviz or try this http://rise4fun.com/Agl for render", "the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe", "io_objstm.object.popFilter() #parse the indirect simpe objects inside it expanded_iobjects =", "Count number of 'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and", "from_python(self, (n,g)): assert type(n) == int and type(g) == int,", "m.digest().encode('hex') h = m.digest()[:n] for i in range(0,50): h =", "class PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML)", "self.text == 'null', 'PDFNull xml not initialized' return None class", "self.pdf_update: if u.has_key(ref): return u[ref] def getRoot(self): ''' Get the", "references = io.xpath(\".//R\") orig = \"%d %d\"%io.id if len(references) ==", "ord(plain_text[-1]) return plain_text[:-pad] assert self.isEncrypted() #Get and print the encryption", "= e.value if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld = decrypt(pld,real_key)", "io.xpath(\".//R\") orig = \"%d %d\"%io.id if len(references) == 0: dotdata", "or float(x) class PDFStartxref(PDFString): def from_python(self, value): assert type(value) ==", "*payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag, value,", "pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value):", "self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (of", "the actual data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) ==", "xref = self.getObjectAt(startxref) return xref.dictionary def getRoot(self): ''' Get the", "tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the", "n < 65535 , 'Invalid object number (%d)'%n assert g", "None deletion_list = [] if dictionary['Length'].value != len(self.data.value): logger.info(\"Length field", "#mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get the object", "startxref == None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) assert", "\\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode", "it register/unregister able. #(think /Crypt 7.4.10 Crypt Filter ) self.data.value", "''' Count number of 'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1", "and text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count number of 'compressed' object", "= \"\" pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex') print", "the object stream by its childs for new_io in expanded_iobjects:", "text()=\"%s\"]'%key))>0 def __getitem__(self, i): if str == type(i): return self.xpath('./entry/name[position()=1", "class PDFDictionary(PDFXML): def to_python(self): return dict([e.value for e in self.getchildren()])", "integer value of the P entry to a 32-bit unsigned", "property(_getdata,_setdata,None) def isFiltered(self): ''' Check if stream is filtered '''", "0 and n < 65535 , 'Invalid object number (%d)'%n", "return xml def __getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return", "\"1rst DIGEST:\", m.digest().encode('hex') h = m.digest()[:n] for i in range(0,50):", "ObjStm structure and replace it with all the new indirect", "Get the object found at certain byte position ''' return", "+= '}\\n' logger.info(\"Writing graph to %s(a dot file). Download graphviz", "to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return self[0] def _setdictionary(self,", "(y + box[x]) % 256 box[x], box[y] = box[y], box[x]", "has_key(self,key): key = \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key):", "\"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m = hashlib.md5()", "__getattr__(self, name): tags = set([e.tag for e in self]) if", "<%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self):", "and [params] or [{}] if params == None: params =", "tuple([int(i) for i in self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value) def", "''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer", "logger.debug(\"Couldn't defilter <%s> stream (exception %s).\"%(self.value,str(e))) logger.info(\"Couldn't defilter <%s> stream.\"%str(self.get_numgen()))", "len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph of the", "it with all the new indirect objects. ''' from opaflib.parser", "value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag: %s\"%tag", "def expandObjStm(self): ''' This parses the ObjStm structure and replace", "encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def", "self.isFiltered(), \"ObjStm should not be compressed at this point\" assert", "key.tag == 'name' self[0] = key key = property(_getkey,_setkey,None) def", "['xref', 'stream'] and xref[0].tag == 'dictionary' return xref[0] def getID(self,", "'\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r in references: dest = \"%d", "len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self, i): if str == type(i):", "type(filters) != list and (type(params) == dict or params==None )", "selected_params and selected_params.value or selected_params) for v,i in deletion_list: del", "= sorted(pointers.keys() + [len(data)]) parsed_objects = [] for p in", "class PDFUpdate(PDFXML): def to_python(self): return dict([e.value for e in self.xpath('./indirect_object')])", "return self[1] def _setval(self, val): self[1] = val val =", "def getID(self, startxref=None): ''' Get the pdf ID from the", "range(0,20): _key = ''.join([chr(ord(k)^i) for k in list(key)]) _buf1 =", "namespace['startxref'] = PDFStartxref namespace['data'] = PDFData #trees namespace['entry'] = PDFEntry", "('\\xff'*4).encode('hex') print \"1rst DIGEST:\", m.digest().encode('hex') h = m.digest()[:n] for i", "where the original token layed in the file ''' def", "#trees namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream", "self] class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, \"Wrong number of", "randpool import base64 def rc4crypt(data, key): x = 0 box", "return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode =", "= encrypt.value print encrypt_py #Ok try to decrypt it ...", "_buf1 assert _buf == encrypt_py['U'][:16] print \"Authenticated! (An actual pass", "stream (ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def", "in self.get('span').split('~')]) def _setspan(self, value): self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end", "= \"%d %d\"%(n,g) def to_python(self): return tuple([int(i) for i in", "data <%s> with key <%s> and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex'))", "return PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\": name = create_leaf('name', \"Name\")", "\", user_password.encode('hex') print \"PAD: \", pad.encode('hex') #b) Initialize the MD5", "value 0xFFFFFFFF to the MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and", "PDFName namespace['string'] = PDFString namespace['number'] = PDFNumber namespace['null'] = PDFNull", "be at least one) ''' return self.pdf_update[-1].startxref[-1] #FIX move all", "3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append ID", "import base64 def rc4crypt(data, key): x = 0 box =", "e in self] class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, \"Wrong", "range(0,len(positions)-1): logger.info(\"Adding new object %s from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object',", "object streams ids/refs''' return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and", "pos): ''' Get the object found at certain byte position", "if startxref == None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref)", "4 bytes with the value 0xFFFFFFFF to the MD5 hash", "['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag: %s\"%tag xml = self.parser.makeelement(tag) xml.value=value", "def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\":", "None' self.text = 'null' def to_python(self): assert self.text == 'null',", "object in the containing pdf ''' pdf = self.xpath('/*')[0] return", "def _getobject(self): return self[0] def _setobject(self, o): self[0] = o", "randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes", "Get the Trailer dictionary (should be at least one) '''", "dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params = dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms'))", "assert all([type(x)==dict for x in params]), 'Params should be a", "= set() for io in self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig", "iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data)", "32 def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode = AES.MODE_CBC", "self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary (should", "= (begin+offset,end+offset) if recursive: for child in self.getchildren(): child.span_move(offset) def", "def to_python(self): assert len(self.getchildren())==1, \"Wrong number of children in indirect", "tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs)", "must be two numbers, n and g' assert n >=", "= iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes):", "4, \"Sorry only Version 4 supported\" #password length n =", "data = self.data.value first = dictionary[\"First\"].value pointers = [int(x) for", "while self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter <%s> stream (exception", "i): if str == type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return", "dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ]) positions = sorted(pointers.keys() +", "= PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs): assert tag", "data): assert data.tag == 'data' self[1] = data data =", "'compressed' object streams ''' return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate", "dict([e.value for e in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and", "_getobject(self): return self[0] def _setobject(self, o): self[0] = o object", "**my_kwargs) elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf',", "in range(0,50): h = hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex') key", "in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self,", "print \"Authenticated! (An actual pass is not needed. Using null", "is filtered ''' return self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value", "return self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value filters = val.get('Filter',None)", "= block_size - len(plain_text) % block_size data = plain_text +", "self.from_python(value) def _to_python(self): return self.to_python() value = property(_to_python,_from_python) def __getattr__(self,", "opaflib.filters import defilterData #Logging facility import logging,code logger = logging.getLogger(\"OPAFXML\")", "None class PDFR(PDFString): def from_python(self, (n,g)): assert type(n) == int", "ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ])", "object streams and expand them. Each ObjStm will be replaced", "encrypt_py #Ok try to decrypt it ... assert encrypt_py['V'] ==", "== type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): '''", "== key_size mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text =", "stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect simpe objects inside", "is mandatory in ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First is mandatory", "integer in the ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i]) for i", "o): self.set('id', \"%d %d\"%o) id = property(_getid,_setid,None) def isStream(self): return", "self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return dict([e.value for e", "assert _buf == encrypt_py['U'][:16] print \"Authenticated! (An actual pass is", "= self.dictionary data = self.data.value first = dictionary[\"First\"].value pointers =", "mandatory in ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First is mandatory in", "pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt every", "match Filters' return zip(filters,params) def popFilter(self): dictionary = self.dictionary assert", "pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return", "k in list(key)]) _buf1 = rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt", "'stream', 'xref', 'pdf', 'pdf_update'], \"Got wrong tree tag: %s\"%tag xml", "[io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def", "''' for u in self.pdf_update: if u.has_key(ref): return u[ref] def", "= property(_getdata,_setdata,None) def isFiltered(self): ''' Check if stream is filtered", "the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py", "to_python(self): return dict([e.value for e in self.xpath('./indirect_object')]) def has_key(self,key): key", "key key = property(_getkey,_setkey,None) def _getval(self): return self[1] def _setval(self,", "this point\" assert self.dictionary.has_key('N'), \"N is mandatory in ObjStm dictionary\"", "ID from the trailer dictionary ''' trailer = self.getTrailer(startxref).value if", "def __setitem__(self, key, val): if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm", "search the referenced indirect object in the containing pdf '''", "m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest()", "root = \"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root except", "dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'): selected_params", "type(dictionary['DecodeParms']) == PDFArray and \\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms'))", "self.dictionary.has_key('N'), \"N is mandatory in ObjStm dictionary\" assert self.dictionary.has_key('First'), \"First", "Generate a .dot graph of the pdf ''' dotdata =", "and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if str", "type(dictionary['Filter']) == PDFArray and \\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter'))", "for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search", "#leaf def create_leaf(self, tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got", "= PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] =", "in ObjStm dictionary\" dictionary = self.dictionary data = self.data.value first", "= [{}]*len(filters) assert all([type(x)==str for x in filters]), 'Filter shall", "g' assert n >= 0 and n < 65535 ,", "return self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString): pass class PDFBool(PDFString):", "its childs ''' for u in self.pdf_update: for ref in", "in filters]), 'Filter shall be a names' assert all([type(x)==dict for", "must be a boolean' self.text = ['false','true'][int(value)] def to_python(self): return", "def span_move(self,offset, recursive=True): begin,end = self.span self.span = (begin+offset,end+offset) if", "def getObjectAt(self, pos): ''' Get the object found at certain", "type for startxref' self.text = str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape'))", "Filtered!' selected_filter = None selected_params = None deletion_list = []", "PDFHeader(PDFString): pass #tree class PDFEntry(PDFXML): def to_python(self): return tuple([e.value for", "= 0 y = 0 out = [] for char", "try: root = \"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root", ">= 0 and g < 65535 , 'Invalid generation number", "return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self): ''' Count", "class PDFNull(PDFString): def from_python(self, value): assert value is None, 'Value", "_buf = hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i in", "a 32-bit unsigned binary number and pass these bytes to", "def has_key(self,key): key = \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self,", "if str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i)", "(type(params) == list or params==None ), type(filters) != list and", "Return true if this is an object stream (ObjStml) '''", "self.text = 'null' def to_python(self): assert self.text == 'null', 'PDFNull", "_key = ''.join([chr(ord(k)^i) for k in list(key)]) _buf1 = rc4crypt(_buf,_key)", "return parsed_objects class PDFArray(PDFXML): def to_python(self): return [e.value for e", "type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self):", "m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e) append ID ? #TODO, get", "g < 65535 , 'Invalid generation number (%d)'%g self.text =", "data.tag == 'data' self[1] = data data = property(_getdata,_setdata,None) def", "ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20): _key = ''.join([chr(ord(k)^i)", "self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml", "pld = decrypt(pld,real_key) e.value=pld #decrypt every string and stream in", "key = h[:n] print \"Encryption KEY\", key.encode('hex') print \"Try to", "self.isFiltered(): self.popFilter() except Exception,e: logger.debug(\"Couldn't defilter <%s> stream (exception %s).\"%(self.value,str(e)))", "Get the object found at certain byte position (only in", "ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find all object", "io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly defilter the object stream while io_objstm.object.isFiltered():", "self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString): pass class PDFBool(PDFString): def", "decrypt(pld,real_key) e.value=pld #decrypt every string and stream in place... for", "null pass '' )\" print \"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex')", "and pass these bytes to the # MD5 hash function,", "return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val): if str", "expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def", "dictionary['Length'].value = len(self.data.value) def defilter(self): try: while self.isFiltered(): self.popFilter() except", "'Value must be None' self.text = 'null' def to_python(self): assert", "Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params)", "return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs,", "mandatory in ObjStm dictionary\" dictionary = self.dictionary data = self.data.value", "= decrypt(pld,real_key) e.value=pld #decrypt every string and stream in place...", "in self.pdf_update: for io in u[:]: if type(io) == PDFIndirect", "key_size = 32 def encrypt(plain_text,key_bytes): assert len(key_bytes) == key_size mode", "in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']: return lambda", "namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self,", "not initialized' return None class PDFR(PDFString): def from_python(self, (n,g)): assert", "stream by its childs for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm)", "'Wrong type for a number' self.text = str(value) def to_python(self):", "least one) ''' if startxref == None: startxref = self.getStartxref().value", "assert len(pointers)%2 == 0 , \"Wrong number of integer in", "name in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): '''", "def to_python(self): return dict([e.value for e in self.xpath('./indirect_object')]) def has_key(self,key):", "the Trailer dictionary (should be at least one) ''' if", "return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true if pdf is", "is an object stream (ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value", "def __getitem__(self, key): if tuple == type(key): key = \"%d", "< 65535 , 'Invalid generation number (%d)'%g self.text = \"%d", "type(key): key = \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def", "== 'null', 'PDFNull xml not initialized' return None class PDFR(PDFString):", "in self.text.split(' ')]) def solve(self): ''' search the referenced indirect", "key.encode('hex') print \"Try to authenticate\" _buf = hashlib.md5(pad + ID).digest()", "chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes, mode,", "''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self,", "= create_tree('indirect_object', [stream], obj=(1,0)) array = create_tree('array', [create_leaf('number', i) for", "def to_python(self): return tuple([e.value for e in self.getchildren()]) def _getkey(self):", "object\" return (self.id, self.object.value) def _getobject(self): return self[0] def _setobject(self,", "the file ''' def _getspan(self): return tuple([int(i) for i in", "data = plain_text + pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size)", "\"\" pad = \"<PASSWORD>\".decode('hex') print \"PASSWORD: \", user_password.encode('hex') print \"PAD:", "e in self]) if name in tags: return self.xpath('./%s'%name) return", "= rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt data <%s> with key", "value): assert type(value) == int , 'Wrong type for startxref'", "and (type(params) == list or params==None ), type(filters) != list", "(x + 1) % 256 y = (y + box[x])", "= PDFString namespace['number'] = PDFNumber namespace['null'] = PDFNull namespace['bool'] =", "attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for child in childs:", "return self.pdf_update[-1].startxref[-1] #FIX move all this to pdf_update and do", "pointers = [int(x) for x in data[:first].split()] assert len(pointers)%2 ==", "pass these bytes to the # MD5 hash function, low-order", "for i in range(0,10)]) xml=indirect print etree.tostring(xml), xml.value import code", "to decrypt it ... assert encrypt_py['V'] == 4, \"Sorry only", "for char in data: x = (x + 1) %", "Get the pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self):", "return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag, value, **kwargs): return", "plain_text + pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes =", "update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the value of the encryption", "\"PASSWORD: \", user_password.encode('hex') print \"PAD: \", pad.encode('hex') #b) Initialize the", "initialized' return None class PDFR(PDFString): def from_python(self, (n,g)): assert type(n)", "Search 'compressed' object streams ids/refs''' return [io.id for io in", "self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary def getRoot(self): ''' Get", "one) ''' return self.pdf_update[-1].startxref[-1] #FIX move all this to pdf_update", "''' Find all object streams and expand them. Each ObjStm", "''' for u in self.pdf_update: for io in u[:]: if", "= AES.MODE_CBC pad = block_size - len(plain_text) % block_size data", "m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\")", "def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self, i): if", "pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def from_python(self, value): assert type(value) in [int,", "not be compressed at this point\" assert self.dictionary.has_key('N'), \"N is", "PDFNull namespace['bool'] = PDFBool namespace['R'] = PDFR namespace['header'] = PDFHeader", "'array', 'stream', 'xref', 'pdf', 'pdf_update'], \"Got wrong tree tag: %s\"%tag", "the MD5 hash function. m.update (encrypt_py['O'][:32]) print \"MD5 update 2\",", "Get the Trailer dictionary (of this update!)''' if startxref ==", "True}[self.text] class PDFNull(PDFString): def from_python(self, value): assert value is None,", "dictionary['DecodeParms'] #FIX recode defilterData .. make it register/unregister able. #(think", "+ ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20): _key =", "= AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:])", "#decrypt every string and stream in place... for e in", "def clear_span(self, recursive=True): del self.attrib['span'] for child in self.getchildren(): child.clear_span()", "attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree def create_tree(self,", "if str == type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class", "object number (%d)'%n assert g >= 0 and g <", "if dictionary['Length'].value != len(self.data.value): logger.info(\"Length field of object %s does", "key = property(_getkey,_setkey,None) def _getval(self): return self[1] def _setval(self, val):", "== PDFIndirect and io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self): '''", "getTrailer(self, startxref=None): ''' Get the Trailer dictionary (of this update!)'''", "'name' self[0] = key key = property(_getkey,_setkey,None) def _getval(self): return", "lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF =", "childs ''' for u in self.pdf_update: for io in u[:]:", "all([type(x)==str for x in filters]), 'Filter shall be a names'", "['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag", "ObjStm will be replaced by its childs ''' for u", "print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c) Pass the value of", "return dict([e.value for e in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1", "y = (y + box[x]) % 256 box[x], box[y] =", "will try to decrypt V:4 null password encryption ''' import", "object %s from objectstream\"%repr((pointers[positions[p]],0))) io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+\" \")) io.id", "def findAllObjStm(self): ''' Search 'compressed' object streams ids/refs''' return [io.id", "'data' self[1] = data data = property(_getdata,_setdata,None) def isFiltered(self): '''", "#d) Convert the integer value of the P entry to", "struct from Crypto.Cipher import AES from Crypto.Util import randpool import", "place... for e in self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'):", "and selected_params.value or selected_params) for v,i in deletion_list: del v[i]", "entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data = create_leaf('data',\"A\"*100) stream", "expandObjStm(self, ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm' #completelly", "= self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter = None", "xref = self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream'] and xref[0].tag", "pad = block_size - len(plain_text) % block_size data = plain_text", "class PDFIndirect(PDFXML): def to_python(self): assert len(self.getchildren())==1, \"Wrong number of children", "4\", ID.encode('hex') #f) If document metadata is not being encrypted,", "by its childs for new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def", "recursive: for child in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end =", "tuple == type(key): key = \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return", "return tuple([e.value for e in self.getchildren()]) def _getkey(self): return self[0]", "defilter <%s> stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return true if this", "self.parser.makeelement(tag) xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items():", "PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs):", "u in self.pdf_update: for io in u[:]: if type(io) ==", "self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get the", "tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']: return", "super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str == type(i): return self.remove(self.xpath('./entry/name[position()=1", "xml.set(attr_key, str(attr_val)) for child in childs: xml.append(child) return xml def", "PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs)", "orig = \"%d %d\"%io.id if len(references) == 0: dotdata +=", "x = (x + 1) % 256 y = (y", "in references: dest = \"%d %d\"%r.value dotdata += '\\t\"%s\" ->", "return xml #Tree def create_tree(self, tag, *childs, **attribs): assert tag", "and stream in place... for e in self.xpath('//stream/data'): decrypt_xml(e) for", "== dict or params==None ) ]), 'Filter/DecodeParms wrong type' if", "Count number of 'compressed' object streams ''' return len(self.xpath('//indirect_object')) def", "pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt every string and stream", "def __getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload,", "Exception,e : pass dotdata += '}\\n' logger.info(\"Writing graph to %s(a", "rc4crypt(_buf,_key) print \"RC4 iter(%d) Encrypt data <%s> with key <%s>", "else: for r in references: dest = \"%d %d\"%r.value dotdata", "text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value}", "''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self): '''", "namespace['string'] = PDFString namespace['number'] = PDFNumber namespace['null'] = PDFNull namespace['bool']", "(pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def to_python(self): return [e.value", "float(x) class PDFStartxref(PDFString): def from_python(self, value): assert type(value) == int", "obj=(1,0)) array = create_tree('array', [create_leaf('number', i) for i in range(0,10)])", "in params]), 'Params should be a dictionary.. or null?' assert", "This parses the ObjStm structure and replace it with all", "sorted(pointers.keys() + [len(data)]) parsed_objects = [] for p in range(0,len(positions)-1):", "startxref == None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) return", "assert key.tag == 'dictionary' self[0] = d dictionary = property(_getdictionary,_setdictionary,None)", "str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString): pass #tree class", "self.id else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self, value):", "value): self.text = value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString):", "tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong leaf tag: %s\"%tag xml =", "to_python(self): return [e.value for e in self] def getStartxref(self): '''", "update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value of the", "self[0] def _setobject(self, o): self[0] = o object = property(_getobject,_setobject,None)", "indirect object ''' for u in self.pdf_update: if u.has_key(ref): return", "== float(x) and int(float(x)) or float(x) class PDFStartxref(PDFString): def from_python(self,", "references: dest = \"%d %d\"%r.value dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig,", "params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del dictionary['DecodeParms'][0] else: selected_filter =", "== PDFArray and \\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del", "first = dictionary[\"First\"].value pointers = [int(x) for x in data[:first].split()]", "= ['false','true'][int(value)] def to_python(self): return {'false': False, 'true': True}[self.text] class", "etree from opaflib.filters import defilterData #Logging facility import logging,code logger", "the indirect simpe objects inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace", "def span_expand(self,span): begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self,", "etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name'] =", "str == type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val else: super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML):", "print \"MD5 update 4\", ID.encode('hex') #f) If document metadata is", "def to_python(self): assert self.text == 'null', 'PDFNull xml not initialized'", "PDFDictionary(PDFXML): def to_python(self): return dict([e.value for e in self.getchildren()]) def", "and print the encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's", "io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for", "self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace", "create_leaf('name', \"Name\") string = create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string]) dictionary", "dictionary\" assert self.dictionary.has_key('First'), \"First is mandatory in ObjStm dictionary\" dictionary", "(0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val))", "? #TODO, get the ID from the trailer.. ID =", "_getkey(self): return self[0] def _setkey(self, key): assert key.tag == 'name'", "assert key.tag == 'name' self[0] = key key = property(_getkey,_setkey,None)", "span = property(_getspan,_setspan) def _to_xml(self): return etree.tostring(self) xml = property(_to_xml)", "PDFStartxref(PDFString): def from_python(self, value): assert type(value) == int , 'Wrong", "streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def countIObj(self):", "#del dictionary['DecodeParms'][0] else: selected_filter = dictionary['Filter'] del dictionary['Filter'] if dictionary.has_key('DecodeParms'):", "xref.dictionary def getRoot(self): ''' Get the pdf Root node of", "''' Get the object found at certain byte position '''", "[params] or [{}] if params == None: params = [{}]*len(filters)", "print \"MD5(padding+ID):\",_buf.encode('hex') for i in range(0,20): _key = ''.join([chr(ord(k)^i) for", "(only in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): '''", "lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string'] = PDFString namespace['number'] =", "= d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def", "== 0: dotdata += '\\t\"%s\";\\n'%x nodes_added.add(orig) else: for r in", "return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm", "in the containing pdf ''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value)", "replace it with all the new indirect objects. ''' from", "startxref' self.text = str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class PDFHeader(PDFString):", "number of 'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2", "Crypto.Cipher import AES from Crypto.Util import randpool import base64 def", "binary number and pass these bytes to the # MD5", "+ [len(data)]) parsed_objects = [] for p in range(0,len(positions)-1): logger.info(\"Adding", "getTrailer(self, startxref=None): ''' Get the Trailer dictionary (should be at", "''' search the referenced indirect object in the containing pdf", "pdf token xml representation will have a span wich indicates", "property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def _setdata(self, data): assert data.tag", "<filename>opaflib/xmlast.py from lxml import etree from opaflib.filters import defilterData #Logging", "str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos):", "self[0] = d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1]", "and encrypt_py['EncryptMetadata'] == false: m.update('\\xff'*4) print \"MD5 update 5\", ('\\xff'*4).encode('hex')", "namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup)", "PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag, value,**attribs): assert tag in", "self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1]))", "PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every pdf token xml representation", "pdf ''' dotdata = \"digraph {\\n\" nodes_added = set() for", "does not match the actual data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value)))", "def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString): pass", "PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup", "0 , \"Wrong number of integer in the ObjStm begining\"", "for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) return xml #Tree", "val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return dict([e.value for", "Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true", "= o object = property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for", "box[i], box[x] = box[x], box[i] x = 0 y =", "return (self.id, self.object.value) def _getobject(self): return self[0] def _setobject(self, o):", "number of integer in the ObjStm begining\" pointers = dict([(pointers[i+1]+first,pointers[i])", "')]) def solve(self): ''' search the referenced indirect object in", "'Wrong type for startxref' self.text = str(value).encode('string_escape') def to_python(self): return", "value.encode('string_escape') def to_python(self): return self.text.decode('string_escape') class PDFName(PDFString): pass class PDFData(PDFString):", "hashlib, struct from Crypto.Cipher import AES from Crypto.Util import randpool", "document metadata is not being encrypted, pass 4 bytes with", "def _setkey(self, key): assert key.tag == 'name' self[0] = key", "self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self, i):", "function and pass the result of step (a) as input", "object stream by its childs for new_io in expanded_iobjects: io_objstm.addnext(new_io)", "'pdf', 'pdf_update'], \"Got wrong tree tag: %s\"%tag xml = self.parser.makeelement(tag)", "a .dot graph of the pdf ''' dotdata = \"digraph", "e in self.getchildren()]) def _getkey(self): return self[0] def _setkey(self, key):", "isFiltered(self): ''' Check if stream is filtered ''' return self.dictionary.has_key('Filter')", "class PDFBool(PDFString): def from_python(self, value): assert type(value) == bool, 'Value", "/Crypt 7.4.10 Crypt Filter ) self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and", "is not being encrypted, pass 4 bytes with the value", "key <%s> and it gives data <%s>\"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex')) _buf = _buf1", "assert self.isEncrypted() #Get and print the encryption dictionary encrypt =", "pass is not needed. Using null pass '' )\" print", "< 65535 , 'Invalid object number (%d)'%n assert g >=", "char in data: x = (x + 1) % 256", "pass '' )\" print \"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def", "= self.text return float(int(float(x))) == float(x) and int(float(x)) or float(x)", "token xml representation will have a span wich indicates where", "= self.getObjectAt(startxref) return xref.dictionary def getRoot(self): ''' Get the pdf", "PDFR(PDFString): def from_python(self, (n,g)): assert type(n) == int and type(g)", "for e in self.xpath('./indirect_object')]) def has_key(self,key): key = \"%d %d\"%key", "#e) append ID ? #TODO, get the ID from the", "== 4, \"Sorry only Version 4 supported\" #password length n", "def expandObjStm(self, ref): io_objstm = self[ref] assert io_objstm.object.dictionary['Type'].value == 'ObjStm'", "defilter the object stream while io_objstm.object.isFiltered(): io_objstm.object.popFilter() #parse the indirect", "def _getspan(self): return tuple([int(i) for i in self.get('span').split('~')]) def _setspan(self,", "MD5 hash function and pass the result of step (a)", "#leafs namespace['name'] = PDFName namespace['string'] = PDFString namespace['number'] = PDFNumber", "needed. Using null pass '' )\" print \"U\", encrypt_py['U'].encode('hex') print", "certain byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): '''", "getIndirectObject(self, ref): ''' Search for an indirect object ''' for", "'dictionary' return xref[0] def getID(self, startxref=None): ''' Get the pdf", "int , 'Wrong type for startxref' self.text = str(value).encode('string_escape') def", "\"Try to authenticate\" _buf = hashlib.md5(pad + ID).digest() print \"MD5(padding+ID):\",_buf.encode('hex')", "== type(i): return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self,", "PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def", "referenced indirect object in the containing pdf ''' pdf =", "the encryption dictionary's O entry to the MD5 hash function.", "id = property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def", "self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref) def defilterAll(self): ''' Find", "i in self.get('id').split(' ')]) def _setid(self, o): self.set('id', \"%d %d\"%o)", "dictionary[\"First\"].value pointers = [int(x) for x in data[:first].split()] assert len(pointers)%2", "65535 , 'Invalid generation number (%d)'%g self.text = \"%d %d\"%(n,g)", "from the trailer dictionary ''' trailer = self.getTrailer(startxref).value if trailer.has_key('ID'):", "the original token layed in the file ''' def _getspan(self):", "objects inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace the object stream", "ObjStm dictionary\" dictionary = self.dictionary data = self.data.value first =", "'stream', 'xref', 'pdf', 'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag, *payload,", "= property(_getdictionary,_setdictionary,None) def _getdata(self): return self[1] def _setdata(self, data): assert", "'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']: return lambda payload, **my_kwargs:", "= etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string']", "span_expand(self,span): begin,end = self.span self.span = (min(begin,span[0]),max(end,span[1])) def clear_span(self, recursive=True):", "pass class PDFData(PDFString): pass class PDFBool(PDFString): def from_python(self, value): assert", "simpe objects inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace the object", "= self.getObjectAt(startxref) assert xref.tag in ['xref', 'stream'] and xref[0].tag ==", "xml not initialized' return None class PDFR(PDFString): def from_python(self, (n,g)):", "the last startxref pointer (should be at least one) '''", "tag: %s\"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span']", "def __delitem__(self, i): if str == type(i): return self.remove(self.xpath('./entry/name[position()=1 and", "params = [{}]*len(filters) assert all([type(x)==str for x in filters]), 'Filter", "encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m = hashlib.md5() m.update(key)", "to the MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] ==", "in place... for e in self.xpath('//stream/data'): decrypt_xml(e) for e in", "Search for an indirect object ''' for u in self.pdf_update:", "in self] def getStartxref(self): ''' Get the last startxref pointer", "except Exception,e : pass dotdata += '}\\n' logger.info(\"Writing graph to", "update. ''' return self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number of", "*args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs: self.create_leaf(tag,", "0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData .. make", "g >= 0 and g < 65535 , 'Invalid generation", "-> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root = \"%d %d\"%self.getRoot()", "print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g = xml_element.get_numgen() m =", "file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find all object streams and expand", "text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref): io_objstm = self[ref] assert", "number' self.text = str(value) def to_python(self): x = self.text return", "from Crypto.Cipher import AES from Crypto.Util import randpool import base64", "in self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML):", "m = hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update 1\", ((user_password+pad)[:32]).encode('hex') #c)", "startxref=None): ''' Get the Trailer dictionary (should be at least", "return self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if", "dictionary['DecodeParms'] deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] if dictionary.has_key('Filter') and \\ type(dictionary['Filter'])", "box[(box[x] + box[y]) % 256])) return ''.join(out) block_size = 16", "block_size data = plain_text + pad * chr(pad) iv_bytes =", "lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name'] = PDFName", "(encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value of the P entry", "None selected_params = None deletion_list = [] if dictionary['Length'].value !=", "= data data = property(_getdata,_setdata,None) def isFiltered(self): ''' Check if", "h = hashlib.md5(h[:n]).digest() print \"Encryption KEY(%d)\"%i, h.encode('hex') key = h[:n]", "_setdictionary(self, d): assert key.tag == 'dictionary' self[0] = d dictionary", "Get the pdf ID from the trailer dictionary ''' trailer", "box[i] + ord(key[i % len(key)])) % 256 box[i], box[x] =", "Initialize the MD5 hash function and pass the result of", "__getitem__(self, key): if tuple == type(key): key = \"%d %d\"%key", "u[ref] def getRoot(self): ''' Get the pdf Root node. '''", "lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag in ['indirect_object','dictionary',", "tags = set([e.tag for e in self]) if name in", "'null', 'PDFNull xml not initialized' return None class PDFR(PDFString): def", "len(plain_text) % block_size data = plain_text + pad * chr(pad)", "else: super(PDFDictionary,self).__setitem__(key,val) def getObjectAt(self, pos): ''' Get the object found", "= self.getStartxref().value xref = self.getObjectAt(startxref) return xref.dictionary def getRoot(self): '''", "the wrapper here def getObjectAt(self, pos): ''' Get the object", "and \\ len(dictionary['Filter']) == 0: deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if", "e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return dict([e.value", "a number' self.text = str(value) def to_python(self): x = self.text", "selected_filter = None selected_params = None deletion_list = [] if", "if pld.endswith(\"\\x0d\\x0a\"): pld = pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt", "childs, **kwargs): return PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\": name =", "encryption dictionary encrypt = self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py =", "= etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace =", "startxref=None): ''' Get the Trailer dictionary (of this update!)''' if", "type(g) == int, 'R must be two numbers, n and", "to_python(self): return tuple([e.value for e in self.getchildren()]) def _getkey(self): return", "= property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return dict([e.value for e", "from_python(self, value): assert type(value) == int , 'Wrong type for", "%d\"%r.value dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try:", "in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], \"Got wrong", "(x + box[i] + ord(key[i % len(key)])) % 256 box[i],", "trailer['ID'] else: return ['',''] def getIndirectObject(self, ref): ''' Search for", "''' for u in self.pdf_update: for ref in u.findAllObjStm(): u.expandObjStm(ref)", "out = [] for char in data: x = (x", "namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object']", "assert encrypt_py['V'] == 4, \"Sorry only Version 4 supported\" assert", "len(pointers)%2 == 0 , \"Wrong number of integer in the", "if tuple == type(key): key = \"%d %d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]", "'ObjStm' def expandObjStm(self): ''' This parses the ObjStm structure and", "isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return [e.value for", "namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect namespace['array']", "self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter = None selected_params", "''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm' def expandObjStm(self): '''", "the trailer.. ID = '' m.update (ID) print \"MD5 update", "create_tree('array', [create_leaf('number', i) for i in range(0,10)]) xml=indirect print etree.tostring(xml),", "getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search the object and generation number", "super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag,", "'\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest) try: root = \"%d", "in self.getchildren()]) def _getkey(self): return self[0] def _setkey(self, key): assert", "m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest() pld =", "or null?' assert len(filters) == len(params),'Number of Decodeparams should match", "m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key = m.digest() pld = e.value if pld.endswith(\"\\x0d\\x0a\"):", "#password length n = encrypt_py['Length']/8 print \"N:\",n #a) Pad or", "== 'name' self[0] = key key = property(_getkey,_setkey,None) def _getval(self):", "= PDFData #trees namespace['entry'] = PDFEntry namespace['dictionary'] = PDFDictionary namespace['stream']", "dictionary\" dictionary = self.dictionary data = self.data.value first = dictionary[\"First\"].value", "encrypt_py['Length']/8 print \"N:\",n #a) Pad or truncate the password string", "self.set('span',\"%d~%d\"%value) def span_move(self,offset, recursive=True): begin,end = self.span self.span = (begin+offset,end+offset)", "need array of decoding params' selected_params = dictionary['DecodeParms'][0] deletion_list.append((dictionary['DecodeParms'],0)) #del", "in self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self): self.parser =", "assert len(filters) == len(params),'Number of Decodeparams should match Filters' return", "not needed. Using null pass '' )\" print \"U\", encrypt_py['U'].encode('hex')", "= PDFIndirect namespace['array'] = PDFArray self.parser.set_element_class_lookup(lookup) #leaf def create_leaf(self, tag,", "del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert dictionary['DecodeParms'] == PDFArray, 'Array of", "!= %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0] del", "property(_to_xml) def _from_python(self, value): self.from_python(value) def _to_python(self): return self.to_python() value", "class PDFArray(PDFXML): def to_python(self): return [e.value for e in self]", "__delitem__(self, key): if tuple == type(key): key = \"%d %d\"%key", "__setitem__(self, key, val): if str == type(key): self.xpath('./entry/name[position()=1 and text()=\"%s\"]/..'%key)[0].val=val", "for r in references: dest = \"%d %d\"%r.value dotdata +=", "dest) nodes_added.add(orig) nodes_added.add(dest) try: root = \"%d %d\"%self.getRoot() dotdata +=", "def _getkey(self): return self[0] def _setkey(self, key): assert key.tag ==", "= '' m.update (ID) print \"MD5 update 4\", ID.encode('hex') #f)", "def _to_python(self): return self.to_python() value = property(_to_python,_from_python) def __getattr__(self, name):", "def create_tree(self, tag, *childs, **attribs): assert tag in ['indirect_object','dictionary', 'entry',", "entry to the MD5 hash function. m.update (encrypt_py['O'][:32]) print \"MD5", "\"Name\") string = create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string]) dictionary =", "class PDFNumber(PDFXML): def from_python(self, value): assert type(value) in [int, float],", "x = (x + box[i] + ord(key[i % len(key)])) %", "in self]) if name in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name)", "%d\"%self.getRoot() dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e : pass", "real_key = m.digest() pld = e.value if pld.endswith(\"\\x0d\\x0a\"): pld =", "and expand them. Each ObjStm will be replaced by its", "== None: startxref = self.getStartxref().value xref = self.getObjectAt(startxref) assert xref.tag", ", 'Invalid generation number (%d)'%g self.text = \"%d %d\"%(n,g) def", "new_io in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed'", "e in self.xpath('./indirect_object')]) def has_key(self,key): key = \"%d %d\"%key return", "return len(self.xpath('//indirect_object')) def graph(xml_pdf,dot='default.dot'): ''' Generate a .dot graph of", "%d\"%key return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple", "assert len(self.getchildren())==1, \"Wrong number of children in indirect object\" return", "lxml import etree from opaflib.filters import defilterData #Logging facility import", "value): assert value is None, 'Value must be None' self.text", "self.span self.span = (begin+offset,end+offset) if recursive: for child in self.getchildren():", "xml def __getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda", "def solve(self): ''' search the referenced indirect object in the", "xml_element.get_numgen() m = hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff))", "the P entry to a 32-bit unsigned binary number and", "update 4\", ID.encode('hex') #f) If document metadata is not being", "in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def get_numgen(self): ''' Search", "P entry to a 32-bit unsigned binary number and pass", "32-bit unsigned binary number and pass these bytes to the", "the object found at certain byte position ''' return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0]", "= encrypt_py['Length']/8 print \"N:\",n #a) Pad or truncate the password", "''' Count number of 'compressed' object streams ''' return len(self.xpath('//indirect_object'))", "#c) Pass the value of the encryption dictionary's O entry", "name = create_leaf('name', \"Name\") string = create_leaf('string', \"Felipe\") entry =", "class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self): return", "return {'false': False, 'true': True}[self.text] class PDFNull(PDFString): def from_python(self, value):", "\"RC4 iter(%d) Encrypt data <%s> with key <%s> and it", "xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key,", "assert xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary' return", "\"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\", 0xffffffff&encrypt_py['P'] )) #e)", ".dot graph of the pdf ''' dotdata = \"digraph {\\n\"", "return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple == type(key): key", "'Array of filters need array of decoding params' selected_params =", "'}\\n' logger.info(\"Writing graph to %s(a dot file). Download graphviz or", "make it register/unregister able. #(think /Crypt 7.4.10 Crypt Filter )", "= dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ]) positions = sorted(pointers.keys()", "def popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'), 'Stream not Filtered!'", "a names' assert all([type(x)==dict for x in params]), 'Params should", "[e.value for e in self] def getStartxref(self): ''' Get the", "node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def isEncrypted(self): ''' Return true if", "Return true if pdf is encrypted ''' return self.getTrailer().has_key('Encrypt') def", "self.findAllObjStm(): self.expandObjStm(ref) #Factory class PDFXMLFactory(): def __init__(self): self.parser = etree.XMLParser()", "not self.isFiltered(), \"ObjStm should not be compressed at this point\"", "return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs) elif tag in", "has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0 def __getitem__(self, i): if str", "self.data.value first = dictionary[\"First\"].value pointers = [int(x) for x in", "super(PDFDictionary,self).__setitem__(key,val) class PDFStream(PDFXML): def to_python(self): return {'dictionary':self[0].value, 'data':self[1].value} def _getdictionary(self):", "this function. m = hashlib.md5() m.update((user_password+pad)[:32]) print \"MD5 update 1\",", "_buf == encrypt_py['U'][:16] print \"Authenticated! (An actual pass is not", "object = property(_getobject,_setobject,None) def _getid(self): return tuple([int(i) for i in", "of 'compressed' object streams ''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and", "first. WTF!!?? print \"MD5 update 3\", struct.pack(\"<L\", 0xffffffff&encrypt_py['P']).encode('hex') m.update (struct.pack(\"<L\",", "def create_leaf(self, tag, value,**attribs): assert tag in ['number','string','name','R','startxref','header','data','null','bool'], \"Got wrong", "and \\ len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX", "else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def from_python(self, value): self.text", "x in filters]), 'Filter shall be a names' assert all([type(x)==dict", "def create_leaf(tag, value, **kwargs): return PDF.create_leaf(tag, value,**kwargs) def create_tree(tag, childs,", "text()=\"%s\"]/..'%i)[0]) return super(PDFDictionary,self).__delitem__(i) def __setitem__(self, key, val): if str ==", "n = encrypt_py['Length']/8 print \"N:\",n #a) Pad or truncate the", "def from_python(self, value): assert type(value) in [int, float], 'Wrong type", "self.dictionary.has_key('First'), \"First is mandatory in ObjStm dictionary\" dictionary = self.dictionary", "def decrypt(self): ''' This will try to decrypt V:4 null", "only Version 4 supported\" assert encrypt_py['R'] == 4, \"Sorry only", "to_python(self): return dict([e.value for e in self.getchildren()]) def has_key(self,key): return", "%d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0]", "self[1] = val val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self):", "print \"RC4 iter(%d) Encrypt data <%s> with key <%s> and", "= hashlib.md5() m.update(key) m.update(chr(n&0xff)) m.update(chr((n>>8)&0xff)) m.update(chr((n>>16)&0xff)) m.update(chr(g&0xff)) m.update(chr((g>>8)&0xff)) m.update(\"sAlT\") real_key", "bytes to the # MD5 hash function, low-order byte first.", "trailer.. ID = '' m.update (ID) print \"MD5 update 4\",", "[] for char in data: x = (x + 1)", "''' return self.pdf_update[-1].startxref[-1] #FIX move all this to pdf_update and", "\"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert the integer value of", "'xref', 'pdf', 'pdf_update']: return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs)", "from lxml import etree from opaflib.filters import defilterData #Logging facility", "len(key_bytes) == key_size mode = AES.MODE_CBC iv_bytes = encrypted_bytes[:block_size] plain_text", "number of any pdf element ''' if self.tag.startswith('indirect'): return self.id", "int and type(g) == int, 'R must be two numbers,", "len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if tuple == type(key): key =", "self.getTrailer(startxref).value if trailer.has_key('ID'): return trailer['ID'] else: return ['',''] def getIndirectObject(self,", "= val.get('DecodeParams',None) assert any([type(filters) == list and (type(params) == list", "box[y] = box[y], box[x] out.append(chr(ord(char) ^ box[(box[x] + box[y]) %", "PDF.create_tree(tag, *childs, **kwargs) if __name__==\"__main__\": name = create_leaf('name', \"Name\") string", "in range(0,20): _key = ''.join([chr(ord(k)^i) for k in list(key)]) _buf1", "indirect simpe objects inside it expanded_iobjects = io_objstm.object.expandObjStm() #replace the", "assert type(value) == int , 'Wrong type for startxref' self.text", "text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')] def expandAllObjStm(self): for ref in self.findAllObjStm(): self.expandObjStm(ref)", "from_python(self, value): assert type(value) in [int, float], 'Wrong type for", "pass class PDFBool(PDFString): def from_python(self, value): assert type(value) == bool,", "deletion_list: del v[i] dictionary['Length'].value = len(self.data.value) def defilter(self): try: while", "for e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self): return", "countIObj(self): ''' Count number of 'compressed' object streams ''' return", "this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the", "o): self[0] = o object = property(_getobject,_setobject,None) def _getid(self): return", "self.xpath('//stream/data'): decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def", "x = 0 y = 0 out = [] for", "certain byte position (only in this update!)''' return self.xpath('.//*[starts-with(@span,\"%d~\")]'%pos)[0] def", "def from_python(self, value): assert type(value) == int , 'Wrong type", "self.text = ['false','true'][int(value)] def to_python(self): return {'false': False, 'true': True}[self.text]", "self.pdf_update: for io in u[:]: if type(io) == PDFIndirect and", "etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string'] =", "h.encode('hex') key = h[:n] print \"Encryption KEY\", key.encode('hex') print \"Try", "#Logging facility import logging,code logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): '''", "**my_kwargs: self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def", "== PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'): assert", "and io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This will", "every string and stream in place... for e in self.xpath('//stream/data'):", "self[self.getTrailer()['Root'].value].object def countObjStm(self): ''' Count number of 'compressed' object streams", "data size (%d != %d)\"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value))) if type(dictionary['Filter']) == PDFArray: selected_filter", "the new indirect objects. ''' from opaflib.parser import parse assert", "= PDFNull namespace['bool'] = PDFBool namespace['R'] = PDFR namespace['header'] =", "]), 'Filter/DecodeParms wrong type' if type(filters) != list: filters=[filters] params=params", "result of step (a) as input to this function. m", "= [] for p in range(0,len(positions)-1): logger.info(\"Adding new object %s", "pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ]) positions =", "of 'compressed' object streams ''' return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and", "will have a span wich indicates where the original token", "= \"%d %d\"%self.getRoot() dotdata += '\\t\"trailer\" -> \"%s\";\\n'%root except Exception,e", "len(key)])) % 256 box[i], box[x] = box[x], box[i] x =", "''' return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../..')) def expandObjStm(self, ref):", "self.attrib['span'] for child in self.getchildren(): child.clear_span() span = property(_getspan,_setspan) def", "fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None) #leafs", "* chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes + AES.new(key_bytes,", ")\" print \"U\", encrypt_py['U'].encode('hex') print \"O\", encrypt_py['O'].encode('hex') def decrypt_xml(xml_element): n,g", "= pld[:-2] pld = decrypt(pld,real_key) e.value=pld #decrypt every string and", "<%s> stream.\"%str(self.get_numgen())) def isObjStm(self): ''' Return true if this is", "move all this to pdf_update and do the wrapper here", "pdf ''' pdf = self.xpath('/*')[0] return pdf.getIndirectObject(self.value) class PDFNumber(PDFXML): def", "for e in self.getchildren()]) def has_key(self,key): return len(self.xpath('./entry/name[position()=1 and text()=\"%s\"]'%key))>0", "these bytes to the # MD5 hash function, low-order byte", "xml = property(_to_xml) def _from_python(self, value): self.from_python(value) def _to_python(self): return", "in ['xref', 'stream'] and xref[0].tag == 'dictionary' return xref[0] def", "''.join([chr(ord(k)^i) for k in list(key)]) _buf1 = rc4crypt(_buf,_key) print \"RC4", "\"%d %d\"%key return self.remove(self.xpath('./indirect_object[@id=\"%s\"]'%key)[0]) return super(PDFUpdate,self).__delitem__(key) def __setitem__(self, key, val):", "^ box[(box[x] + box[y]) % 256])) return ''.join(out) block_size =", "''' Get the Trailer dictionary (of this update!)''' if startxref", "256])) return ''.join(out) block_size = 16 key_size = 32 def", "facility import logging,code logger = logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base", "for io in self.pdf_update.indirect_object: references = io.xpath(\".//R\") orig = \"%d", "to decrypt V:4 null password encryption ''' import hashlib, struct", "assert dictionary.has_key('Filter'), 'Stream not Filtered!' selected_filter = None selected_params =", "wrong type' if type(filters) != list: filters=[filters] params=params and [params]", "def getIndirectObject(self, ref): ''' Search for an indirect object '''", "parsed_objects class PDFArray(PDFXML): def to_python(self): return [e.value for e in", "property(_getid,_setid,None) def isStream(self): return len(self.xpath('./stream'))==1 class PDFPdf(PDFXML): def to_python(self): return", "encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode = AES.MODE_CBC", "self]) if name in tags: return self.xpath('./%s'%name) return getattr(super(PDFXML,self),name) def", "PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update'] = PDFUpdate namespace['indirect_object'] = PDFIndirect", "key): if tuple == type(key): key = \"%d %d\"%key return", "function. m.update (encrypt_py['O'][:32]) print \"MD5 update 2\", (encrypt_py['O'][:32]).encode('hex') #d) Convert", "(a) as input to this function. m = hashlib.md5() m.update((user_password+pad)[:32])", "decrypt_xml(e) for e in self.xpath('//string'): decrypt_xml(e) class PDFUpdate(PDFXML): def to_python(self):", "None, 'Value must be None' self.text = 'null' def to_python(self):", "tuple([e.value for e in self.getchildren()]) def _getkey(self): return self[0] def", "\"Got wrong leaf tag: %s\"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span',", "wrong leaf tag: %s\"%tag xml = self.parser.makeelement(tag) xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1))", "nodes_added = set() for io in self.pdf_update.indirect_object: references = io.xpath(\".//R\")", "return self.xpath('./indirect_object[@id=\"%s\"]'%key)[0] return super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple ==", "''' Get the pdf Root node. ''' return self.getIndirectObject(self.getTrailer()['Root'].value).object def", "print \"PASSWORD: \", user_password.encode('hex') print \"PAD: \", pad.encode('hex') #b) Initialize", "iv_bytes).encrypt(data) return encrypted_bytes def decrypt(encrypted_bytes,key_bytes): #assert len(key_bytes) == key_size mode", "stream is filtered ''' return self.dictionary.has_key('Filter') def getFilters(self): val =", "filters]), 'Filter shall be a names' assert all([type(x)==dict for x", "return u[ref] def getRoot(self): ''' Get the pdf Root node.", "return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()=\"Type\"]/../name[position()=2 and text()=\"ObjStm\"]/../../../..')]", "to pdf_update and do the wrapper here def getObjectAt(self, pos):", "encryption ''' import hashlib, struct from Crypto.Cipher import AES from", "PDFString namespace['number'] = PDFNumber namespace['null'] = PDFNull namespace['bool'] = PDFBool", "None: params = [{}]*len(filters) assert all([type(x)==str for x in filters]),", "elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']:", "for v,i in deletion_list: del v[i] dictionary['Length'].value = len(self.data.value) def", "self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen() #leaf class PDFString(PDFXML): def", "return dict([e.value for e in self.xpath('./indirect_object')]) def has_key(self,key): key =", "if params == None: params = [{}]*len(filters) assert all([type(x)==str for", "i in range(0,20): _key = ''.join([chr(ord(k)^i) for k in list(key)])", "element ''' if self.tag.startswith('indirect'): return self.id else: return self.getparent().get_numgen() #leaf", "val): if str == type(key): self.xpath('./indirect_object[@obj=\"%s\"]'%key)[0][:]=[val] #mmm else: super(PDFDictionary,self).__setitem__(key,val) def", "self[0] = key key = property(_getkey,_setkey,None) def _getval(self): return self[1]", "logging.getLogger(\"OPAFXML\") class PDFXML(etree.ElementBase): ''' Base pdf-xml class. Every pdf token", "defilterAll(self): ''' Find all object streams and expand them. Each", "del attribs['span'] for attr_key, attr_val in attribs.items(): xml.set(attr_key, str(attr_val)) for", "self.xpath('./entry/name[position()=1 and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str", "MD5 hash function, low-order byte first. WTF!!?? print \"MD5 update", "child in childs: xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs): if", "io.id = (pointers[positions[p]],0) parsed_objects.append(io) return parsed_objects class PDFArray(PDFXML): def to_python(self):", "PDFNumber(PDFXML): def from_python(self, value): assert type(value) in [int, float], 'Wrong", "etree.XMLParser() fallback = etree.ElementDefaultClassLookup(PDFXML) lookup = etree.ElementNamespaceClassLookup(fallback) namespace = lookup.get_namespace(None)", "the MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false:", "value of the P entry to a 32-bit unsigned binary", "return self.xpath('//*[starts-with(@span,\"%d~\")]'%pos)[0] def getTrailer(self, startxref=None): ''' Get the Trailer dictionary", "io.isStream() and io.object.isFiltered(): io.object.defilter() def decrypt(self): ''' This will try", "__getattr__(self,tag, *args,**kwargs): if tag in ['number','string','name','R','startxref','header','data','null','bool']: return lambda payload, **my_kwargs:", "val val = property(_getval,_setval,None) class PDFDictionary(PDFXML): def to_python(self): return dict([e.value", "namespace['dictionary'] = PDFDictionary namespace['stream'] = PDFStream namespace['pdf'] = PDFPdf namespace['pdf_update']", "in expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed' object", "'dictionary' self[0] = d dictionary = property(_getdictionary,_setdictionary,None) def _getdata(self): return", "= \"digraph {\\n\" nodes_added = set() for io in self.pdf_update.indirect_object:", "and text()=\"%s\"]/../*[position()=2]'%i)[0] return super(PDFDictionary,self).__getitem__(i) def __delitem__(self, i): if str ==", "expandObjStm(self): ''' This parses the ObjStm structure and replace it", "\"%d %d\"%r.value dotdata += '\\t\"%s\" -> \"%s\";\\n'%(orig, dest) nodes_added.add(orig) nodes_added.add(dest)", "val.get('Filter',None) params = val.get('DecodeParams',None) assert any([type(filters) == list and (type(params)", "deletion_list.append((dictionary, 'Filter')) #del dictionary['Filter'] if dictionary.has_key('DecodeParms') and \\ type(dictionary['DecodeParms']) ==", "self.getTrailer()['Encrypt'].solve().object print \"It's ENCRYPTED!\" encrypt_py = encrypt.value print encrypt_py #Ok", "''' Generate a .dot graph of the pdf ''' dotdata", "self.text = str(value) def to_python(self): x = self.text return float(int(float(x)))", "an indirect object ''' for u in self.pdf_update: if u.has_key(ref):", "Filters' return zip(filters,params) def popFilter(self): dictionary = self.dictionary assert dictionary.has_key('Filter'),", "is not needed. Using null pass '' )\" print \"U\",", "for startxref' self.text = str(value).encode('string_escape') def to_python(self): return int(self.text.decode('string_escape')) class", "be compressed at this point\" assert self.dictionary.has_key('N'), \"N is mandatory", "value): assert type(value) == bool, 'Value must be a boolean'", "u.expandObjStm(ref) def defilterAll(self): ''' Find all object streams and expand", "deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData .. make it", "user_password.encode('hex') print \"PAD: \", pad.encode('hex') #b) Initialize the MD5 hash", "have a span wich indicates where the original token layed", "pad * chr(pad) iv_bytes = randpool.RandomPool(512).get_bytes(block_size) encrypted_bytes = iv_bytes +", "xml.value=value xml.span=attribs.setdefault('span', (0xffffffff,-1)) del attribs['span'] for attr_key, attr_val in attribs.items():", "'data':self[1].value} def _getdictionary(self): return self[0] def _setdictionary(self, d): assert key.tag", "it.\"%dot) file(dot,\"w\").write(dotdata) def expandAllObjStm(self): ''' Find all object streams and", "box[x], box[i] x = 0 y = 0 out =", "box[i] x = 0 y = 0 out = []", "as input to this function. m = hashlib.md5() m.update((user_password+pad)[:32]) print", "= (x + box[i] + ord(key[i % len(key)])) % 256", "[{}]*len(filters) assert all([type(x)==str for x in filters]), 'Filter shall be", "by its childs ''' for u in self.pdf_update: for io", "print \"N:\",n #a) Pad or truncate the password string to", "type(dictionary['Filter']) == PDFArray: selected_filter = dictionary['Filter'][0] del dictionary['Filter'][0] if dictionary.has_key('DecodeParms'):", "token layed in the file ''' def _getspan(self): return tuple([int(i)", "0xFFFFFFFF to the MD5 hash function. if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata']", "= \"%d %d\"%key return len(self.xpath('./indirect_object[@id=\"%s\"]'%key))>0 def __getitem__(self, key): if tuple", "namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] = PDFData #trees", "__name__==\"__main__\": name = create_leaf('name', \"Name\") string = create_leaf('string', \"Felipe\") entry", "boolean' self.text = ['false','true'][int(value)] def to_python(self): return {'false': False, 'true':", "super(PDFUpdate,self).__getitem__(key) def __delitem__(self, key): if tuple == type(key): key =", "len(dictionary['DecodeParms']) == 0: deletion_list.append((dictionary, 'DecodeParms')) #del dictionary['DecodeParms'] #FIX recode defilterData", "expanded_iobjects: io_objstm.addnext(new_io) self.remove(io_objstm) def findAllObjStm(self): ''' Search 'compressed' object streams", "by its childs ''' for u in self.pdf_update: for ref", "O entry to the MD5 hash function. m.update (encrypt_py['O'][:32]) print", "in self.getchildren(): child.span_move(offset) def span_expand(self,span): begin,end = self.span self.span =", "from the trailer.. ID = '' m.update (ID) print \"MD5", "PDFR namespace['header'] = PDFHeader namespace['startxref'] = PDFStartxref namespace['data'] = PDFData", "to_python(self): assert self.text == 'null', 'PDFNull xml not initialized' return", "= lookup.get_namespace(None) #leafs namespace['name'] = PDFName namespace['string'] = PDFString namespace['number']", "xml representation will have a span wich indicates where the", "in childs: xml.append(child) return xml def __getattr__(self,tag, *args,**kwargs): if tag", "= create_leaf('string', \"Felipe\") entry = create_tree('entry',[name,string]) dictionary = create_tree('dictionary',[entry]) stream_data", "[int(x) for x in data[:first].split()] assert len(pointers)%2 == 0 ,", "filtered ''' return self.dictionary.has_key('Filter') def getFilters(self): val = self.dictionary.value filters", "+ ord(key[i % len(key)])) % 256 box[i], box[x] = box[x],", "to a 32-bit unsigned binary number and pass these bytes", "self.create_tree(tag, *payload, **my_kwargs) return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs) PDF = PDFXMLFactory() def create_leaf(tag,", "Trailer dictionary (of this update!)''' if startxref == None: startxref", "object stream (ObjStml) ''' return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm'", "tuple([int(i) for i in self.get('id').split(' ')]) def _setid(self, o): self.set('id'," ]
[ "= tf.Variable(tf.zeros([784, 10]), name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\") y =", "convolutional(x, keep_prob): def conv2d(x, w): return tf.nn.conv2d(x, w, [1, 1,", "1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1,", "ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def", "tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28,", "w_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop,", "as tf # y=ax+b linear model def regression(x): a =", "weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat", "= tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 =", "b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1,", "b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2", "y=ax+b linear model def regression(x): a = tf.Variable(tf.zeros([784, 10]), name=\"a\")", "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)", "def regression(x): a = tf.Variable(tf.zeros([784, 10]), name=\"a\") b = tf.Variable(tf.zeros([10]),", "def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image =", "= tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1,", "w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5,", "name=\"b\") y = tf.nn.softmax(tf.matmul(x, a) + b) return y, [a,", "64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7", "linear model def regression(x): a = tf.Variable(tf.zeros([784, 10]), name=\"a\") b", "= tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 =", "[-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1)", "padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2, 2, 1],", "a = tf.Variable(tf.zeros([784, 10]), name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\") y", "1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2, 2,", "y, [a, b] # 定义卷积模型 def convolutional(x, keep_prob): def conv2d(x,", "[a, b] # 定义卷积模型 def convolutional(x, keep_prob): def conv2d(x, w):", "7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) +", "= tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28,", "w, [1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(", "= tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1,", "max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7 * 7 * 64,", "x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')", "1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial =", "name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x, a) +", "h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7 * 7", "64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)", "= max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7 * 7 *", "import tensorflow as tf # y=ax+b linear model def regression(x):", "= tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2", "keep_prob): def conv2d(x, w): return tf.nn.conv2d(x, w, [1, 1, 1,", "2, 2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1)", "tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1", "initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1,", "return tf.nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2,", "= weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 =", "2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial", "h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) #", "+ b) return y, [a, b] # 定义卷积模型 def convolutional(x,", "return y, [a, b] # 定义卷积模型 def convolutional(x, keep_prob): def", "return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28, 1]) w_conv1", "max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2,", "w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1 =", "# 全连接层 w_fc1 = weight_variable([7 * 7 * 64, 1024])", "2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape):", "= weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2))", "1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial)", "bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1)", "b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1", "* 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop =", "shape=shape) return tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28, 1])", "x_image = tf.reshape(x, [-1, 28, 28, 1]) w_conv1 = weight_variable([5,", "32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)", "+ b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32,", "tf # y=ax+b linear model def regression(x): a = tf.Variable(tf.zeros([784,", "tf.reshape(x, [-1, 28, 28, 1]) w_conv1 = weight_variable([5, 5, 1,", "weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial", "# 定义卷积模型 def convolutional(x, keep_prob): def conv2d(x, w): return tf.nn.conv2d(x,", "initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial =", "tf.Variable(initial) x_image = tf.reshape(x, [-1, 28, 28, 1]) w_conv1 =", "b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7 *", "10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y,", "bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image = tf.reshape(x,", "定义卷积模型 def convolutional(x, keep_prob): def conv2d(x, w): return tf.nn.conv2d(x, w,", "1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 *", "7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2,", "= bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1,", "tf.nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],", "w): return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME') def", "= weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 =", "weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image,", "* 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1,", "[-1, 28, 28, 1]) w_conv1 = weight_variable([5, 5, 1, 32])", "= weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024])", "5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1)", "max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64])", "tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5,", "全连接层 w_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1", "[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x,", "= tf.nn.softmax(tf.matmul(x, a) + b) return y, [a, b] #", "conv2d(x, w): return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME')", "tensorflow as tf # y=ax+b linear model def regression(x): a", "# y=ax+b linear model def regression(x): a = tf.Variable(tf.zeros([784, 10]),", "1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) +", "w_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2", "def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape):", "= bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 =", "tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape)", "= max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 =", "bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1, w_conv2,", "1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2,", "tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y", "b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32, 64])", "28, 1]) w_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 =", "weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return", "tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, w_fc2,", "= tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x, a) + b) return", "return tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME') def max_pool_2x2(x):", "w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024,", "2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return", "tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x, a) + b) return y,", "tf.nn.softmax(tf.matmul(x, a) + b) return y, [a, b] # 定义卷积模型", "tf.nn.conv2d(x, w, [1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return", "28, 28, 1]) w_conv1 = weight_variable([5, 5, 1, 32]) b_conv1", "tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) x_image", "tf.Variable(tf.zeros([784, 10]), name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x,", "= tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10])", "5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2)", "b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7", "tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,", "tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 =", "10]), name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x, a)", "model def regression(x): a = tf.Variable(tf.zeros([784, 10]), name=\"a\") b =", "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2 =", "keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y =", "h_pool1 = max_pool_2x2(h_conv1) w_conv2 = weight_variable([5, 5, 32, 64]) b_conv2", "bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])", "y = tf.nn.softmax(tf.matmul(x, a) + b) return y, [a, b]", "def max_pool_2x2(x): return tf.nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1,", "* 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat =", "64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1,", "b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10]) b_fc2", "def convolutional(x, keep_prob): def conv2d(x, w): return tf.nn.conv2d(x, w, [1,", "padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def", "bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2)", "w_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1", "y = tf.nn.softmax(tf.matmul(h_fc1_drop, w_fc2)) return y, [w_conv1, b_conv1, w_conv2, b_conv2,", "def conv2d(x, w): return tf.nn.conv2d(x, w, [1, 1, 1, 1],", "32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) +", "return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)", "<filename>course-code/imooc-tf-mnist-flask/mnist/module.py import tensorflow as tf # y=ax+b linear model def", "= bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 *", "b) return y, [a, b] # 定义卷积模型 def convolutional(x, keep_prob):", "h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1", "1]) w_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32])", "w_fc2)) return y, [w_conv1, b_conv1, w_conv2, b_conv2, w_fc1, w_fc2, b_fc2]", "b = tf.Variable(tf.zeros([10]), name=\"b\") y = tf.nn.softmax(tf.matmul(x, a) + b)", "a) + b) return y, [a, b] # 定义卷积模型 def", "* 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)", "7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) h_fc1_drop", "+ b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层 w_fc1 = weight_variable([7", "= bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 =", "stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return", "w_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 =", "+ b_fc1) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) w_fc2 = weight_variable([1024, 10])", "regression(x): a = tf.Variable(tf.zeros([784, 10]), name=\"a\") b = tf.Variable(tf.zeros([10]), name=\"b\")", "h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) w_conv2", "= tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # 全连接层", "b] # 定义卷积模型 def convolutional(x, keep_prob): def conv2d(x, w): return", "weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1,", "= tf.reshape(x, [-1, 28, 28, 1]) w_conv1 = weight_variable([5, 5,", "strides=[1, 2, 2, 1], padding='SAME') def weight_variable(shape): initial = tf.truncated_normal(shape," ]
[ "handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in) ==", "= txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) == 1:", "log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0: return", "handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net if len(transfers_in) ==", "= transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0] row =", "and len(transfers_out) == 1: sent_amount, sent_currency, _, _ = transfers_out[0]", "= txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net if \"MintTo\" in", "== 1 and len(transfers_in) == 1 and transfers_in[0][0] == 1):", "row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) return handle_unknown_detect_transfers(exporter,", "log_instructions and len(transfers_out) == 1 and len(transfers_in) == 1 and", "log_instructions = txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net if \"MintTo\"", "sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo):", "transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out)", "sent_currency, _, _ = transfers_out[0] received_amount, received_currency, _, _ =", "1 and len(transfers_in) == 0: return True elif (\"MintTo\" in", "def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net", "txinfo.transfers_net if \"MintTo\" in log_instructions and len(transfers_out) == 1 and", "_ = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) ==", "0: return True elif (\"MintTo\" in log_instructions and len(transfers_out) ==", "and len(transfers_in) == 0: return True elif (\"MintTo\" in log_instructions", "received_amount, received_currency, _, _ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount,", "1): return True else: return False def handle_nft_mint(exporter, txinfo): transfers_in,", "= make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) return handle_unknown_detect_transfers(exporter, txinfo)", "= transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row)", "len(transfers_in) == 1 and transfers_in[0][0] == 1): return True else:", "_ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)", "return True else: return False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out,", "1 and len(transfers_in) == 1 and transfers_in[0][0] == 1): return", "sent_amount, sent_currency, _, _ = transfers_out[0] received_amount, received_currency, _, _", "common.make_tx import make_swap_tx from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo):", "transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) return", "== 1 and len(transfers_in) == 0: return True elif (\"MintTo\"", "import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net", "== 1): return True else: return False def handle_nft_mint(exporter, txinfo):", "1 and transfers_in[0][0] == 1): return True else: return False", "def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in)", "\"MintTo\" in log_instructions and len(transfers_out) == 1 and len(transfers_in) ==", "1 and len(transfers_out) == 1: sent_amount, sent_currency, _, _ =", "else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out,", "txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net if \"MintTo\" in log_instructions", "transfers_in[0][0] == 1): return True else: return False def handle_nft_mint(exporter,", "True else: return False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown", "import make_swap_tx from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in,", "== 1: sent_amount, sent_currency, _, _ = transfers_out[0] received_amount, received_currency,", "in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:", "_, _ = transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0]", "received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions", "handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net if", "make_swap_tx from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out,", "received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions =", "== 1 and transfers_in[0][0] == 1): return True else: return", "len(transfers_in) == 0: return True elif (\"MintTo\" in log_instructions and", "(\"MintTo\" in log_instructions and len(transfers_out) == 1 and len(transfers_in) ==", "make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def", "txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in) == 1", "sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions", "= txinfo.transfers_net if \"MintTo\" in log_instructions and len(transfers_out) == 1", "handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out, _", "txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out, _ =", "received_currency, _, _ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency,", "== 1 and len(transfers_out) == 1: sent_amount, sent_currency, _, _", "transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0] row = make_swap_tx(txinfo,", "transfers_in, transfers_out, _ = txinfo.transfers_net if len(transfers_in) == 1 and", "sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ =", "transfers_out, _ = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out)", "from common.make_tx import make_swap_tx from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter,", "exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo) def is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in,", "elif (\"MintTo\" in log_instructions and len(transfers_out) == 1 and len(transfers_in)", "transfers_in, transfers_out, _ = txinfo.transfers_net if \"MintTo\" in log_instructions and", "from sol.handle_simple import handle_unknown_detect_transfers def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _", "return True elif (\"MintTo\" in log_instructions and len(transfers_out) == 1", "_ = transfers_out[0] received_amount, received_currency, _, _ = transfers_in[0] row", "transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if len(transfers_in) == 1 and", "1: sent_amount, sent_currency, _, _ = transfers_out[0] received_amount, received_currency, _,", "= make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter, txinfo)", "if len(transfers_in) == 1 and len(transfers_out) == 1: sent_amount, sent_currency,", "and len(transfers_in) == 1 and transfers_in[0][0] == 1): return True", "len(transfers_out) == 1 and len(transfers_in) == 1 and transfers_in[0][0] ==", "txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net if len(transfers_in) == 1", "if \"MintTo\" in log_instructions and len(transfers_out) == 1 and len(transfers_in)", "True elif (\"MintTo\" in log_instructions and len(transfers_out) == 1 and", "in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 1", "txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) == 1: sent_amount,", "return False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net", "transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else:", "and len(transfers_out) == 1 and len(transfers_in) == 0: return True", "len(transfers_out) == 1 and len(transfers_in) == 0: return True elif", "transfers_out, _ = txinfo.transfers_net if \"MintTo\" in log_instructions and len(transfers_out)", "_, _ = transfers_in[0] row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount,", "_ = txinfo.transfers_net if \"MintTo\" in log_instructions and len(transfers_out) ==", "and len(transfers_out) == 1 and len(transfers_in) == 1 and transfers_in[0][0]", "is_nft_mint(txinfo): log_instructions = txinfo.log_instructions transfers_in, transfers_out, _ = txinfo.transfers_net if", "transfers_unknown = txinfo.transfers_net if len(transfers_in) == 1 and len(transfers_out) ==", "== 0: return True elif (\"MintTo\" in log_instructions and len(transfers_out)", "len(transfers_in) == 1 and len(transfers_out) == 1: sent_amount, sent_currency, _,", "False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net if", "def handle_metaplex(exporter, txinfo): transfers_in, transfers_out, _ = txinfo.transfers_net if len(transfers_in)", "and transfers_in[0][0] == 1): return True else: return False def", "row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency) exporter.ingest_row(row) else: handle_unknown_detect_transfers(exporter,", "len(transfers_out) == 1: sent_amount, sent_currency, _, _ = transfers_out[0] received_amount,", "else: return False def handle_nft_mint(exporter, txinfo): transfers_in, transfers_out, transfers_unknown =" ]
[ "u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov", "proj(u_x) p_yz = proj(u_y) # Use the pdcor statistic def", "0, 1], ... [0, 1, 1, 1], ... [1, 1,", "for two random vectors. The test is a permutation test", "dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... # doctest:", "Use the pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product(", "exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the", "statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)", "Also -------- distance_correlation_t_statistic Examples -------- >>> import numpy as np", "1, 1], ... [1, 1, 0, 1]]) >>> c =", "return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x,", "dcor >>> a = np.array([[1, 2, 3, 4], ... [5,", ">>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>>", "distance covariance, for two random vectors. The test is a", "u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test(", "partial_distance_covariance Examples -------- >>> import numpy as np >>> import", "v = n * (n - 3) / 2 return", "T statistic. See Also -------- distance_correlation_t_test Examples -------- >>> import", "t distribution. The null hypothesis is that the two random", "def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix(", "... # doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b,", "... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c)", "Observed random vector. The columns correspond with the individual random", "n * (n - 3) / 2 return np.sqrt(v -", "np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf)", "dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a,", "distance_covariance_test( x, y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\"", "vector. y: array_like Second random vector. The columns correspond with", "_dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs)", "See Also -------- distance_correlation_t_statistic Examples -------- >>> import numpy as", "= _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov statistic", "_dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov statistic def", "dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'):", "y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) # Compute", "import numpy as np >>> import dcor >>> a =", "Results of the hypothesis test. See Also -------- distance_correlation_t_statistic Examples", "1, 1], ... [1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a)", "of the bias corrected version of distance correlation used in", "*, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of distance", "of the Euclidean distance, in the range :math:`(0, 2)`. Equivalently,", "1, 1], ... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'):", "\"\"\" import numpy as np import scipy.stats from . import", "The functions in this module provide methods for testing if", "of the random vector. num_resamples: int Number of permutations resamples", "bcdcor / np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y): \"\"\" Test", "6, 7, 8], ... [9, 10, 11, 12], ... [13,", "is twice the Hurst parameter of fractional Brownian motion. num_resamples:", "columns correspond with the individual random variables while the rows", "dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b,", "array_like Observed random vector. The columns correspond with the individual", "exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj =", "statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0,", "the partial distance covariance, for two random vectors conditioned on", "2)`. Equivalently, it is twice the Hurst parameter of fractional", "random_state = _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x,", "_transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) # Compute U-centered matrices", "... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a,", "exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of partial distance covariance", "instances of the random vector. Returns ------- HypothesisTest Results of", "random vector. z: array_like Observed random vector. The columns correspond", "a = np.array([[1, 2, 3, 4], ... [5, 6, 7,", "/ np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y): \"\"\" Test of", "0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... #", "proj(u_y) # Use the pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0]", "statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0,", ">>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS", "inf \"\"\" bcdcor = u_distance_correlation_sqr(x, y) n = x.shape[0] v", "conditioned on a third. The test is a permutation test", "of the hypothesis test. See Also -------- partial_distance_covariance Examples --------", "scipy.stats from . import _dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr", "covariance independence. Compute the test of independence based on the", "random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0)", "def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix(", "1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a,", "_dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs)", "-0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor", "instances of the random vector. num_resamples: int Number of permutations", "a Student t distribution. The null hypothesis is that the", "range :math:`(0, 2)`. Equivalently, it is twice the Hurst parameter", "c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b,", "11, 12], ... [13, 14, 15, 16]]) >>> b =", "... [13, 14, 15, 16]]) >>> b = np.array([[1, 0,", "... dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>>", "inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS -0.4430164... >>>", ">>> dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15)", "HypothesisTest Results of the hypothesis test. See Also -------- distance_correlation_t_statistic", "for two random vectors conditioned on a third. The test", "See Also -------- partial_distance_covariance Examples -------- >>> import numpy as", "+ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest:", "_transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) #", "individual random variables while the rows are individual instances of", "2 df = v - 1 p_value = 1 -", "... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ...", "doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest:", ">>> c = np.array([[1000, 0, 0, 1000], ... [0, 1000,", "Brownian motion. num_resamples: int Number of permutations resamples to take", "._utils import _random_state_init, _transform_to_2d def distance_covariance_test( x, y, *, num_resamples=0,", "independence. Compute the test of independence based on the partial", "the random vector. z: array_like Observed random vector. The columns", "doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5,", "motion. num_resamples: int Number of permutations resamples to take in", "# doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ...", "(n - 3) / 2 df = v - 1", "10, 11, 12], ... [13, 14, 15, 16]]) >>> b", "to generate the permutations. Returns ------- HypothesisTest Results of the", "one. Parameters ---------- x: array_like First random vector. The columns", "(n - 3) / 2 return np.sqrt(v - 1) *", "are individual instances of the random vector. exponent: float Exponent", "random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state) # Compute U-centered", "Compute the test of independence based on the partial distance", "[1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ... #", "distance_correlation_t_statistic(x, y): \"\"\" Transformation of the bias corrected version of", "np.array([[1000, 0, 0, 1000], ... [0, 1000, 1000, 1000], ...", "random vectors are independent. Parameters ---------- x: array_like First random", "random variables while the rows are individual instances of the", "test. See Also -------- partial_distance_covariance Examples -------- >>> import numpy", "[0, 1, 1, 1], ... [1, 1, 1, 1], ...", "1000, 1000], ... [1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a,", "testing independence of several distributions. The functions in this module", "of fractional Brownian motion. num_resamples: int Number of permutations resamples", "c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b,", "dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor = u_distance_correlation_sqr(x, y) n =", "Results of the hypothesis test. See Also -------- partial_distance_covariance Examples", "[9, 10, 11, 12], ... [13, 14, 15, 16]]) >>>", "Student t distribution. The null hypothesis is that the two", "= _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered,", "HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...)", "of distance covariance independence. Compute the test of independence based", "version of distance correlation used in :func:`distance_correlation_t_test`. Parameters ---------- x:", "as np >>> import dcor >>> a = np.array([[1, 2,", "x, y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test", "[13, 14, 15, 16]]) >>> b = np.array([[1, 0, 0,", "statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x,", "variables while the rows are individual instances of the random", "x, y, z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\"", "num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of partial distance", "in the permutation test. random_state: {None, int, array_like, numpy.random.RandomState} Random", "hypothesis test. See Also -------- distance_covariance Examples -------- >>> import", "hypothesis is that the first two random vectors are independent", "test is a permutation test where the null hypothesis is", "y) n = x.shape[0] v = n * (n -", "Exponent of the Euclidean distance, in the range :math:`(0, 2)`.", "the null hypothesis is that the two random vectors are", "float Exponent of the Euclidean distance, in the range :math:`(0,", "# Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz", "while the rows are individual instances of the random vector.", "4], ... [5, 6, 7, 8], ... [9, 10, 11,", "Number of permutations resamples to take in the permutation test.", "b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ...", "16]]) >>> b = np.array([[1, 0, 0, 1], ... [0,", "null hypothesis is that the two random vectors are independent.", "= u_distance_correlation_sqr(x, y) n = x.shape[0] v = n *", "_dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) # Compute U-centered matrices u_x", "x.shape[0] v = n * (n - 3) / 2", "of distance correlation used in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like", "distance_correlation_t_test(x, y): \"\"\" Test of independence for high dimension based", "1, 1, 1], ... [1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a,", "y, z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test", "_random_state_init, _transform_to_2d def distance_covariance_test( x, y, *, num_resamples=0, exponent=1, random_state=None,", "exponent=exponent) # Use the dcov statistic def statistic_function(distance_matrix): return u_x.shape[0]", "test where the null hypothesis is that the first two", "dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b,", "1], ... [1, 1, 1, 1], ... [1, 1, 0,", "to take in the permutation test. random_state: {None, int, array_like,", "= _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent)", "random_state = _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic(", "U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y =", "b) inf \"\"\" bcdcor = u_distance_correlation_sqr(x, y) n = x.shape[0]", ">>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS", "HypothesisTest Results of the hypothesis test. See Also -------- partial_distance_covariance", "two random vectors are independent. \"\"\" import numpy as np", "dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a,", "a permutation test where the null hypothesis is that the", "[1, 1, 1, 1], ... [1, 1, 0, 1]]) >>>", "of partial distance covariance independence. Compute the test of independence", "np.sqrt(v - 1) * bcdcor / np.sqrt(1 - bcdcor**2) def", ">>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x", "the random vector. exponent: float Exponent of the Euclidean distance,", "random vectors are independent given the third one. Parameters ----------", "b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b,", "/ 2 df = v - 1 p_value = 1", "of the random vector. z: array_like Observed random vector. The", "from ._utils import _random_state_init, _transform_to_2d def distance_covariance_test( x, y, *,", "= v - 1 p_value = 1 - scipy.stats.t.cdf(t_test, df=df)", "distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def", "are individual instances of the random vector. Returns ------- HypothesisTest", "dimension based on convergence to a Student t distribution. The", "np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor = u_distance_correlation_sqr(x, y)", "import dcor >>> a = np.array([[1, 2, 3, 4], ...", ">>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor =", "the samples generated from two random vectors are independent. \"\"\"", "statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15)", "the range :math:`(0, 2)`. Equivalently, it is twice the Hurst", "* _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state,", "1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... # doctest:", "of independence for high dimension based on convergence to a", "doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x, y) n", "of the random vector. y: array_like Second random vector. The", "with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0,", "_hypothesis from ._dcor import u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d", "dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS", "._dcor import u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d def distance_covariance_test(", "vector. Returns ------- numpy scalar T statistic. See Also --------", "------- HypothesisTest Results of the hypothesis test. See Also --------", "= _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz", "np.array([[1, 0, 0, 1], ... [0, 1, 1, 1], ...", "HypothesisTest Results of the hypothesis test. See Also -------- distance_covariance", "Use the dcov statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product(", "statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12)", "0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>>", "statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ...", "df = v - 1 p_value = 1 - scipy.stats.t.cdf(t_test,", "b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b,", "... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x,", "statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\" Transformation of", "test. See Also -------- distance_covariance Examples -------- >>> import numpy", "Transformation of the bias corrected version of distance correlation used", "= proj(u_x) p_yz = proj(u_y) # Use the pdcor statistic", "n_jobs=1, ): \"\"\" Test of partial distance covariance independence. Compute", "p_xz = proj(u_x) p_yz = proj(u_y) # Use the pdcor", "... dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\"", "individual instances of the random vector. num_resamples: int Number of", "+ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\"", "u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples,", "is that the two random vectors are independent. Parameters ----------", "0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest: +ELLIPSIS", "random_state=None, n_jobs=1, ): \"\"\" Test of partial distance covariance independence.", "third one. Parameters ---------- x: array_like First random vector. The", "statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\"", "rows are individual instances of the random vector. y: array_like", "b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b,", "HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest: +ELLIPSIS", "individual instances of the random vector. Returns ------- HypothesisTest Results", "): \"\"\" Test of distance covariance independence. Compute the test", "Compute the test of independence based on the distance covariance,", "... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ...", "= _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz = proj(u_y) # Use", "the rows are individual instances of the random vector. Returns", "np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y): \"\"\" Test of independence", "* bcdcor / np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y): \"\"\"", "statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... # doctest:", "... [1, 1, 1, 1], ... [1, 1, 0, 1]])", "= x.shape[0] v = n * (n - 3) /", "dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>>", "test where the null hypothesis is that the two random", "num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z, *, num_resamples=0,", "u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute", "= _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state) # Compute U-centered", "based on the distance covariance, for two random vectors. The", "1000], ... [1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b)", "are individual instances of the random vector. y: array_like Second", "random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x = _transform_to_2d(x) y = _transform_to_2d(y)", "generate the permutations. Returns ------- HypothesisTest Results of the hypothesis", "instances of the random vector. Returns ------- numpy scalar T", "1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a,", "# doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b)", "- 1) * bcdcor / np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x,", "Results of the hypothesis test. See Also -------- distance_covariance Examples", "independence based on the distance covariance, for two random vectors.", "instances of the random vector. y: array_like Second random vector.", "np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf)", "= _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state = _random_state_init(random_state)", "The test is a permutation test where the null hypothesis", "random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333...,", "# Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent)", "- bcdcor**2) def distance_correlation_t_test(x, y): \"\"\" Test of independence for", "independent given the third one. Parameters ---------- x: array_like First", "vector. num_resamples: int Number of permutations resamples to take in", "on convergence to a Student t distribution. The null hypothesis", "of the random vector. Returns ------- HypothesisTest Results of the", "twice the Hurst parameter of fractional Brownian motion. num_resamples: int", "np import scipy.stats from . import _dcor_internals, _hypothesis from ._dcor", "scalar T statistic. See Also -------- distance_correlation_t_test Examples -------- >>>", "provide methods for testing if the samples generated from two", "0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b)", "... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ...", "HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... #", "... [9, 10, 11, 12], ... [13, 14, 15, 16]])", "HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state) # Compute U-centered matrices", "The null hypothesis is that the two random vectors are", "the permutation test. random_state: {None, int, array_like, numpy.random.RandomState} Random state", "u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj = _dcor_internals.u_complementary_projection(u_z)", "... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c,", "Second random vector. The columns correspond with the individual random", "u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d def distance_covariance_test( x, y,", "num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13)", "distance_correlation_t_statistic Examples -------- >>> import numpy as np >>> import", "vector. z: array_like Observed random vector. The columns correspond with", "y) random_state = _random_state_init(random_state) # Compute U-centered matrices u_x =", "Hurst parameter of fractional Brownian motion. num_resamples: int Number of", "/ 2 return np.sqrt(v - 1) * bcdcor / np.sqrt(1", "... [1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0)", "Functions for testing independence of several distributions. The functions in", "samples generated from two random vectors are independent. \"\"\" import", "the distance covariance, for two random vectors. The test is", "1, 1, 1], ... [1, 1, 1, 1], ... [1,", "in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like First random vector. The", "individual instances of the random vector. exponent: float Exponent of", "y, centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov statistic def statistic_function(distance_matrix):", "15, 16]]) >>> b = np.array([[1, 0, 0, 1], ...", "# doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...)", "statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return", "third. The test is a permutation test where the null", "1 p_value = 1 - scipy.stats.t.cdf(t_test, df=df) return _hypothesis.HypothesisTest(p_value=p_value, statistic=t_test)", "def distance_covariance_test( x, y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ):", "corrected version of distance correlation used in :func:`distance_correlation_t_test`. Parameters ----------", "y): \"\"\" Test of independence for high dimension based on", "1000, 1000, 1000], ... [1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a,", "_random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered,", "rows are individual instances of the random vector. exponent: float", "num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\" Transformation of the", "based on convergence to a Student t distribution. The null", "1], ... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ...", "* (n - 3) / 2 df = v -", "b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5,", "The columns correspond with the individual random variables while the", "resamples to take in the permutation test. random_state: {None, int,", ">>> dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>>", "doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf", "---------- x: array_like First random vector. The columns correspond with", "z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of", "independence of several distributions. The functions in this module provide", "-------- distance_correlation_t_statistic Examples -------- >>> import numpy as np >>>", "... [1, 1, 0, 1]]) >>> c = np.array([[1000, 0,", "statistic=208.0) \"\"\" x = _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y)", "num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0)", "... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest:", "# Use the pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0] *", "1000], ... [1000, 1000, 1000, 1000], ... [1000, 1000, 0,", "_random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y", "Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz =", "the hypothesis test. See Also -------- distance_correlation_t_statistic Examples -------- >>>", "Test of independence for high dimension based on convergence to", "- 1 p_value = 1 - scipy.stats.t.cdf(t_test, df=df) return _hypothesis.HypothesisTest(p_value=p_value,", "... [0, 1, 1, 1], ... [1, 1, 1, 1],", "correlation used in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like First random", ">>> import numpy as np >>> import dcor >>> a", "are independent. Parameters ---------- x: array_like First random vector. The", "\"\"\" random_state = _random_state_init(random_state) # Compute U-centered matrices u_x =", "the null hypothesis is that the first two random vectors", "matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic(", "is a permutation test where the null hypothesis is that", "of several distributions. The functions in this module provide methods", "Examples -------- >>> import numpy as np >>> import dcor", "HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0,", "0, 1]]) >>> c = np.array([[1000, 0, 0, 1000], ...", "in the range :math:`(0, 2)`. Equivalently, it is twice the", "return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function,", "HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0)", "1, 1, 1], ... [1, 1, 0, 1]]) >>> with", "*, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of partial", "def partial_distance_covariance_test( x, y, z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1,", "independence for high dimension based on convergence to a Student", "\"\"\" Test of partial distance covariance independence. Compute the test", "# Use the dcov statistic def statistic_function(distance_matrix): return u_x.shape[0] *", "x = _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state =", "are individual instances of the random vector. z: array_like Observed", "statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz,", "a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x = _transform_to_2d(x)", "[1000, 1000, 1000, 1000], ... [1000, 1000, 0, 1000]]) >>>", "+ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS", "partial distance covariance independence. Compute the test of independence based", "the two random vectors are independent. Parameters ---------- x: array_like", "z: array_like Observed random vector. The columns correspond with the", "fractional Brownian motion. num_resamples: int Number of permutations resamples to", "-------- distance_covariance Examples -------- >>> import numpy as np >>>", "np.array([[1, 2, 3, 4], ... [5, 6, 7, 8], ...", "1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ...", ">>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a,", "See Also -------- distance_correlation_t_test Examples -------- >>> import numpy as", "the random vector. Returns ------- HypothesisTest Results of the hypothesis", "int Number of permutations resamples to take in the permutation", "p_yz = proj(u_y) # Use the pdcor statistic def statistic_function(distance_matrix):", "= _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections", "distance, in the range :math:`(0, 2)`. Equivalently, it is twice", "Test of distance covariance independence. Compute the test of independence", "3, 4], ... [5, 6, 7, 8], ... [9, 10,", "2 return np.sqrt(v - 1) * bcdcor / np.sqrt(1 -", "p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\" Transformation", ">>> import dcor >>> a = np.array([[1, 2, 3, 4],", "for high dimension based on convergence to a Student t", "HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451...,", "_dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj", "testing if the samples generated from two random vectors are", "int, array_like, numpy.random.RandomState} Random state to generate the permutations. Returns", "= proj(u_y) # Use the pdcor statistic def statistic_function(distance_matrix): return", "random vector. y: array_like Second random vector. The columns correspond", "numpy as np import scipy.stats from . import _dcor_internals, _hypothesis", "centering=_dcor_internals.double_centered, exponent=exponent) # Use the dcov statistic def statistic_function(distance_matrix): return", "the random vector. y: array_like Second random vector. The columns", "# doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ...", "3) / 2 return np.sqrt(v - 1) * bcdcor /", "independence based on the partial distance covariance, for two random", "HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666...,", "with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor = u_distance_correlation_sqr(x,", "2, 3, 4], ... [5, 6, 7, 8], ... [9,", "1, 1], ... [1, 1, 1, 1], ... [1, 1,", "return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y):", "\"\"\" x = _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x, y) random_state", "... [1000, 1000, 1000, 1000], ... [1000, 1000, 0, 1000]])", "Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y", "numpy scalar T statistic. See Also -------- distance_correlation_t_test Examples --------", "of independence based on the distance covariance, for two random", "\"\"\" Test of distance covariance independence. Compute the test of", "doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ... #", "of the random vector. exponent: float Exponent of the Euclidean", "= np.array([[1, 0, 0, 1], ... [0, 1, 1, 1],", "independent. \"\"\" import numpy as np import scipy.stats from .", "test of independence based on the distance covariance, for two", "... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ...", ">>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b)", ">>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5,", "doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b)", "Equivalently, it is twice the Hurst parameter of fractional Brownian", "several distributions. The functions in this module provide methods for", "covariance, for two random vectors. The test is a permutation", "\"\"\" Transformation of the bias corrected version of distance correlation", "See Also -------- distance_covariance Examples -------- >>> import numpy as", "partial distance covariance, for two random vectors conditioned on a", "3) / 2 df = v - 1 p_value =", "b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'):", "num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of distance covariance", "a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a,", "test of independence based on the partial distance covariance, for", "the permutations. Returns ------- HypothesisTest Results of the hypothesis test.", "_dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz = proj(u_y) # Use the", "individual instances of the random vector. Returns ------- numpy scalar", "generated from two random vectors are independent. \"\"\" import numpy", "import scipy.stats from . import _dcor_internals, _hypothesis from ._dcor import", "centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) # Use", ". import _dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr from ._utils", "of the hypothesis test. See Also -------- distance_covariance Examples --------", "statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x, y) n = x.shape[0] v", "the Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it", "exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of distance covariance independence.", "individual instances of the random vector. z: array_like Observed random", "_dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent)", "the pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix,", "dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>>", "of the random vector. Returns ------- numpy scalar T statistic.", "distance covariance independence. Compute the test of independence based on", "_dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr from ._utils import _random_state_init,", "num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state) # Compute", "_dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent)", "* _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state,", "... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state) #", "-------- distance_correlation_t_test Examples -------- >>> import numpy as np >>>", "HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666...,", "distance_matrix, p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def", "n = x.shape[0] v = n * (n - 3)", "Also -------- distance_correlation_t_test Examples -------- >>> import numpy as np", "given the third one. Parameters ---------- x: array_like First random", "for testing if the samples generated from two random vectors", "y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ): \"\"\" Test of", "random vector. The columns correspond with the individual random variables", "state to generate the permutations. Returns ------- HypothesisTest Results of", "hypothesis is that the two random vectors are independent. Parameters", "statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z, *,", "are individual instances of the random vector. Returns ------- numpy", "as np import scipy.stats from . import _dcor_internals, _hypothesis from", "1], ... [0, 1, 1, 1], ... [1, 1, 1,", "= distance_correlation_t_statistic(x, y) n = x.shape[0] v = n *", "n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\" Transformation of the bias corrected", "distribution. The null hypothesis is that the two random vectors", "proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz = proj(u_y) #", "1000], ... [0, 1000, 1000, 1000], ... [1000, 1000, 1000,", "are independent given the third one. Parameters ---------- x: array_like", "module provide methods for testing if the samples generated from", "1, 0, 1]]) >>> c = np.array([[1000, 0, 0, 1000],", "1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a) ...", "on the partial distance covariance, for two random vectors conditioned", "a) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b)", "x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y, centering=_dcor_internals.double_centered, exponent=exponent) #", "this module provide methods for testing if the samples generated", "import numpy as np import scipy.stats from . import _dcor_internals,", "high dimension based on convergence to a Student t distribution.", "random vectors. The test is a permutation test where the", "v - 1 p_value = 1 - scipy.stats.t.cdf(t_test, df=df) return", "U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent)", "doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ... #", "vectors conditioned on a third. The test is a permutation", "np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... #", "bcdcor = u_distance_correlation_sqr(x, y) n = x.shape[0] v = n", "HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x = _transform_to_2d(x) y = _transform_to_2d(y) _dcor_internals._check_same_n_elements(x,", "= np.array([[1000, 0, 0, 1000], ... [0, 1000, 1000, 1000],", "vectors are independent given the third one. Parameters ---------- x:", "the hypothesis test. See Also -------- partial_distance_covariance Examples -------- >>>", "bcdcor**2) def distance_correlation_t_test(x, y): \"\"\" Test of independence for high", "Returns ------- numpy scalar T statistic. See Also -------- distance_correlation_t_test", "permutation test. random_state: {None, int, array_like, numpy.random.RandomState} Random state to", "= np.array([[1, 2, 3, 4], ... [5, 6, 7, 8],", "c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state =", ">>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS -0.4430164... >>> with", "from two random vectors are independent. \"\"\" import numpy as", "statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz) return", "... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5,", "-------- partial_distance_covariance Examples -------- >>> import numpy as np >>>", "1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0,", ":func:`distance_correlation_t_test`. Parameters ---------- x: array_like First random vector. The columns", "individual instances of the random vector. y: array_like Second random", "covariance, for two random vectors conditioned on a third. The", "p_yz) return _hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x,", "two random vectors conditioned on a third. The test is", ">>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... #", "12], ... [13, 14, 15, 16]]) >>> b = np.array([[1,", "_transform_to_2d def distance_covariance_test( x, y, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1,", "Parameters ---------- x: array_like First random vector. The columns correspond", "_hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y,", "u_distance_correlation_sqr(x, y) n = x.shape[0] v = n * (n", "): \"\"\" Test of partial distance covariance independence. Compute the", "statistic. See Also -------- distance_correlation_t_test Examples -------- >>> import numpy", "two random vectors are independent given the third one. Parameters", "is that the first two random vectors are independent given", "b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b)", "c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c,", "dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test", "v = n * (n - 3) / 2 df", "... [0, 1000, 1000, 1000], ... [1000, 1000, 1000, 1000],", "permutations. Returns ------- HypothesisTest Results of the hypothesis test. See", ">>> dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30)", "... dcor.independence.distance_correlation_t_statistic(b, b) inf \"\"\" bcdcor = u_distance_correlation_sqr(x, y) n", "of permutations resamples to take in the permutation test. random_state:", "1], ... [1, 1, 0, 1]]) >>> c = np.array([[1000,", "[1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0, statistic=208.0) >>>", "the rows are individual instances of the random vector. num_resamples:", "1000, 1000], ... [1000, 1000, 1000, 1000], ... [1000, 1000,", "a third. The test is a permutation test where the", "array_like First random vector. The columns correspond with the individual", "convergence to a Student t distribution. The null hypothesis is", "random_state: {None, int, array_like, numpy.random.RandomState} Random state to generate the", "_hypothesis._permutation_test_with_sym_matrix( p_xz, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\"", "HypothesisTest(p_value=1.0, statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest: +ELLIPSIS", "7, 8], ... [9, 10, 11, 12], ... [13, 14,", "1], ... [1, 1, 0, 1]]) >>> dcor.independence.distance_covariance_test(a, a) HypothesisTest(p_value=1.0,", "the test of independence based on the partial distance covariance,", "independence. Compute the test of independence based on the distance", "0, 0, 1000], ... [0, 1000, 1000, 1000], ... [1000,", "from ._dcor import u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d def", "[0, 1000, 1000, 1000], ... [1000, 1000, 1000, 1000], ...", "HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x, y) n = x.shape[0]", "random vector. Returns ------- HypothesisTest Results of the hypothesis test.", "the random vector. num_resamples: int Number of permutations resamples to", "the bias corrected version of distance correlation used in :func:`distance_correlation_t_test`.", "with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ...", "[1, 1, 0, 1]]) >>> c = np.array([[1000, 0, 0,", "1000, 1000, 1000], ... [1000, 1000, 1000, 1000], ... [1000,", "are individual instances of the random vector. num_resamples: int Number", "random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13)", "a) HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS", "dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x =", "instances of the random vector. exponent: float Exponent of the", ">>> b = np.array([[1, 0, 0, 1], ... [0, 1,", "b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a,", "with the individual random variables while the rows are individual", "the test of independence based on the distance covariance, for", "for testing independence of several distributions. The functions in this", "= _random_state_init(random_state) # Compute U-centered matrices u_x = _dcor_internals._distance_matrix_generic( x,", "return np.sqrt(v - 1) * bcdcor / np.sqrt(1 - bcdcor**2)", "y: array_like Second random vector. The columns correspond with the", "distance_correlation_t_test Examples -------- >>> import numpy as np >>> import", "if the samples generated from two random vectors are independent.", "= n * (n - 3) / 2 df =", "HypothesisTest(p_value=1.0, statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0,", "y): \"\"\" Transformation of the bias corrected version of distance", "exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z, exponent=exponent) #", "+ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ...", "Also -------- distance_covariance Examples -------- >>> import numpy as np", "14, 15, 16]]) >>> b = np.array([[1, 0, 0, 1],", "# doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b,", "distance covariance, for two random vectors conditioned on a third.", "projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x) p_yz = proj(u_y)", "HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...)", "u_x = _dcor_internals._distance_matrix_generic( x, centering=_dcor_internals.double_centered, exponent=exponent) u_y = _dcor_internals._distance_matrix_generic( y,", "n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z, *, num_resamples=0, exponent=1, random_state=None,", "rows are individual instances of the random vector. num_resamples: int", "on the distance covariance, for two random vectors. The test", "the hypothesis test. See Also -------- distance_covariance Examples -------- >>>", "... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x = _transform_to_2d(x) y", "return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function,", "[1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a)", "0, 1000], ... [0, 1000, 1000, 1000], ... [1000, 1000,", "distributions. The functions in this module provide methods for testing", ">>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>>", "independent. Parameters ---------- x: array_like First random vector. The columns", "b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state)", "where the null hypothesis is that the two random vectors", "rows are individual instances of the random vector. Returns -------", "\"\"\" t_test = distance_correlation_t_statistic(x, y) n = x.shape[0] v =", "two random vectors. The test is a permutation test where", "exponent=exponent) # Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz = proj(u_x)", "random vector. exponent: float Exponent of the Euclidean distance, in", ">>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>>", "where the null hypothesis is that the first two random", "... [1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a,", "the random vector. Returns ------- numpy scalar T statistic. See", "exponent: float Exponent of the Euclidean distance, in the range", "... [1000, 1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ...", "\"\"\" Test of independence for high dimension based on convergence", "hypothesis test. See Also -------- distance_correlation_t_statistic Examples -------- >>> import", "1, 1, 1], ... [1, 1, 0, 1]]) >>> c", "pdcor statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.u_product( distance_matrix, p_yz)", "1) * bcdcor / np.sqrt(1 - bcdcor**2) def distance_correlation_t_test(x, y):", "------- numpy scalar T statistic. See Also -------- distance_correlation_t_test Examples", "b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c,", "numpy.random.RandomState} Random state to generate the permutations. Returns ------- HypothesisTest", ">>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\"", "def distance_correlation_t_statistic(x, y): \"\"\" Transformation of the bias corrected version", "Random state to generate the permutations. Returns ------- HypothesisTest Results", "random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z, *, num_resamples=0, exponent=1,", "dcov statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y)", "a) inf >>> dcor.independence.distance_correlation_t_statistic(a, b) ... # doctest: +ELLIPSIS -0.4430164...", "on a third. The test is a permutation test where", "used in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like First random vector.", "1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_statistic(a, a) inf", "def distance_correlation_t_test(x, y): \"\"\" Test of independence for high dimension", "dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7, random_state=0) HypothesisTest(p_value=1.0, statistic=-7.5701764...e-12) \"\"\" random_state", "dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a,", "the rows are individual instances of the random vector. y:", "test. random_state: {None, int, array_like, numpy.random.RandomState} Random state to generate", "[5, 6, 7, 8], ... [9, 10, 11, 12], ...", "statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...)", "hypothesis test. See Also -------- partial_distance_covariance Examples -------- >>> import", "dcor.independence.distance_correlation_t_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.6327451..., statistic=-0.4430164...) >>> with", "u_x, statistic_function=statistic_function, num_resamples=num_resamples, random_state=random_state, n_jobs=n_jobs) def partial_distance_covariance_test( x, y, z,", "b = np.array([[1, 0, 0, 1], ... [0, 1, 1,", "# doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x, y)", "vector. Returns ------- HypothesisTest Results of the hypothesis test. See", "import u_distance_correlation_sqr from ._utils import _random_state_init, _transform_to_2d def distance_covariance_test( x,", "vectors. The test is a permutation test where the null", "... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7,", ">>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a,", "the rows are individual instances of the random vector. z:", "_dcor_internals._u_distance_matrix(z, exponent=exponent) # Compute projections proj = _dcor_internals.u_complementary_projection(u_z) p_xz =", "statistic=208.0) >>> dcor.independence.distance_covariance_test(a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...)", "of the hypothesis test. See Also -------- distance_correlation_t_statistic Examples --------", "\"\"\" Functions for testing independence of several distributions. The functions", "1000, 0, 1000]]) >>> dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest:", "random vector. num_resamples: int Number of permutations resamples to take", "correspond with the individual random variables while the rows are", "two random vectors are independent. Parameters ---------- x: array_like First", "vector. exponent: float Exponent of the Euclidean distance, in the", "parameter of fractional Brownian motion. num_resamples: int Number of permutations", "np >>> import dcor >>> a = np.array([[1, 2, 3,", "permutation test where the null hypothesis is that the two", "random vector. Returns ------- numpy scalar T statistic. See Also", "statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0) HypothesisTest(p_value=0.5, statistic=11.7532305...) >>>", "{None, int, array_like, numpy.random.RandomState} Random state to generate the permutations.", "# doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ...", "the third one. Parameters ---------- x: array_like First random vector.", "that the two random vectors are independent. Parameters ---------- x:", "statistic=-7.5701764...e-12) \"\"\" random_state = _random_state_init(random_state) # Compute U-centered matrices u_x", "8], ... [9, 10, 11, 12], ... [13, 14, 15,", "- 3) / 2 return np.sqrt(v - 1) * bcdcor", "in this module provide methods for testing if the samples", "Test of partial distance covariance independence. Compute the test of", "permutations resamples to take in the permutation test. random_state: {None,", "dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=0)", "u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z =", "Returns ------- HypothesisTest Results of the hypothesis test. See Also", "with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(b, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0,", "the individual random variables while the rows are individual instances", "array_like, numpy.random.RandomState} Random state to generate the permutations. Returns -------", "Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y,", "that the first two random vectors are independent given the", "Euclidean distance, in the range :math:`(0, 2)`. Equivalently, it is", "the rows are individual instances of the random vector. exponent:", "are independent. \"\"\" import numpy as np import scipy.stats from", "c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b,", "the dcov statistic def statistic_function(distance_matrix): return u_x.shape[0] * _dcor_internals.mean_product( distance_matrix,", "numpy as np >>> import dcor >>> a = np.array([[1,", "distance correlation used in :func:`distance_correlation_t_test`. Parameters ---------- x: array_like First", "b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ...", "random vectors conditioned on a third. The test is a", "of independence based on the partial distance covariance, for two", "+ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c) ... # doctest:", "matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z", "num_resamples: int Number of permutations resamples to take in the", "num_resamples=5, random_state=0) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5,", "+ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a,", "bias corrected version of distance correlation used in :func:`distance_correlation_t_test`. Parameters", "doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>>", "first two random vectors are independent given the third one.", "u_x.shape[0] * _dcor_internals.mean_product( distance_matrix, u_y) return _hypothesis._permutation_test_with_sym_matrix( u_x, statistic_function=statistic_function, num_resamples=num_resamples,", "... [5, 6, 7, 8], ... [9, 10, 11, 12],", "... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0,", "based on the partial distance covariance, for two random vectors", "distance_correlation_t_statistic(x, y) n = x.shape[0] v = n * (n", "take in the permutation test. random_state: {None, int, array_like, numpy.random.RandomState}", "to a Student t distribution. The null hypothesis is that", "random_state=random_state, n_jobs=n_jobs) def distance_correlation_t_statistic(x, y): \"\"\" Transformation of the bias", "t_test = distance_correlation_t_statistic(x, y) n = x.shape[0] v = n", "n * (n - 3) / 2 df = v", "import _dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr from ._utils import", "distance_covariance Examples -------- >>> import numpy as np >>> import", "0, 0, 1], ... [0, 1, 1, 1], ... [1,", "x: array_like First random vector. The columns correspond with the", "num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a, c, b, ... num_resamples=7,", "vectors are independent. Parameters ---------- x: array_like First random vector.", "vectors are independent. \"\"\" import numpy as np import scipy.stats", "= n * (n - 3) / 2 return np.sqrt(v", "n_jobs=1, ): \"\"\" Test of distance covariance independence. Compute the", "it is twice the Hurst parameter of fractional Brownian motion.", "permutation test where the null hypothesis is that the first", "Also -------- partial_distance_covariance Examples -------- >>> import numpy as np", "* (n - 3) / 2 return np.sqrt(v - 1)", "= _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y = _dcor_internals._u_distance_matrix(y, exponent=exponent) u_z = _dcor_internals._u_distance_matrix(z,", "functions in this module provide methods for testing if the", "methods for testing if the samples generated from two random", "test. See Also -------- distance_correlation_t_statistic Examples -------- >>> import numpy", "[1, 1, 0, 1]]) >>> with np.errstate(divide='ignore'): ... dcor.independence.distance_correlation_t_test(a, a)", "+ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test = distance_correlation_t_statistic(x, y) n =", "1]]) >>> c = np.array([[1000, 0, 0, 1000], ... [0,", "partial_distance_covariance_test( x, y, z, *, num_resamples=0, exponent=1, random_state=None, n_jobs=1, ):", "+ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>> dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=0)", "statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, b, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>>", "-------- >>> import numpy as np >>> import dcor >>>", "\"\"\" bcdcor = u_distance_correlation_sqr(x, y) n = x.shape[0] v =", "First random vector. The columns correspond with the individual random", "instances of the random vector. z: array_like Observed random vector.", "the first two random vectors are independent given the third", "b) ... # doctest: +ELLIPSIS -0.4430164... >>> with np.errstate(divide='ignore'): ...", "b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) \"\"\" t_test =", "import _random_state_init, _transform_to_2d def distance_covariance_test( x, y, *, num_resamples=0, exponent=1,", "... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...) >>> dcor.independence.partial_distance_covariance_test(a, b, c)", ">>> a = np.array([[1, 2, 3, 4], ... [5, 6,", "dcor.independence.partial_distance_covariance_test(b, b, c) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=2.2533380...e-30) >>>", "random_state=None, n_jobs=1, ): \"\"\" Test of distance covariance independence. Compute", ">>> dcor.independence.partial_distance_covariance_test(a, a, b) ... # doctest: +ELLIPSIS HypothesisTest(p_value=1.0, statistic=142.6664416...)", ":math:`(0, 2)`. Equivalently, it is twice the Hurst parameter of", "c = np.array([[1000, 0, 0, 1000], ... [0, 1000, 1000,", "null hypothesis is that the first two random vectors are", "# Compute U-centered matrices u_x = _dcor_internals._u_distance_matrix(x, exponent=exponent) u_y =", "rows are individual instances of the random vector. z: array_like", "- 3) / 2 df = v - 1 p_value", "from . import _dcor_internals, _hypothesis from ._dcor import u_distance_correlation_sqr from", "random_state=13) HypothesisTest(p_value=0.3333333..., statistic=11.7532305...) >>> dcor.independence.distance_covariance_test(a, a, ... num_resamples=7, random_state=0) HypothesisTest(p_value=0.125,", "dcor.independence.partial_distance_covariance_test(a, b, c, ... num_resamples=5, random_state=13) HypothesisTest(p_value=0.1666666..., statistic=7.2690070...e-15) >>> dcor.independence.partial_distance_covariance_test(a,", "# doctest: +ELLIPSIS HypothesisTest(p_value=0.0, statistic=inf) >>> dcor.independence.distance_correlation_t_test(a, b) ... #", "HypothesisTest(p_value=1.0, statistic=11.75323056...) >>> dcor.independence.distance_covariance_test(b, b) HypothesisTest(p_value=1.0, statistic=1.3604610...) >>> dcor.independence.distance_covariance_test(a, b,", "num_resamples=7, random_state=0) HypothesisTest(p_value=0.125, statistic=208.0) \"\"\" x = _transform_to_2d(x) y =", "array_like Second random vector. The columns correspond with the individual", "random vectors are independent. \"\"\" import numpy as np import", "vector. The columns correspond with the individual random variables while", "the Hurst parameter of fractional Brownian motion. num_resamples: int Number" ]
[ "colormap = None if name in self.colormaps: colormap = self.colormaps[name]", "# check for duplicates self.images[name] = image.image(name) return self.images[name] def", "yield i def set_dims(self, w, h): \"\"\" Set the dimensions", "check for duplicates self.variables[name] = {'type':type, 'min':min, 'max':max} def add_image(self,", "duplicates self.parameterlist.append([name, type]) def add_variable(self, name, type, min, max): \"\"\"", "renderer from . import convert class cis: \"\"\"Composible Image Set", "set of colormaps. \"\"\" #if colormap not in dict if", "print(\"\\n\") def get_image(self, key): \"\"\" Returns an image given its", "variable. \"\"\" variable = None if name in self.variables: variable", "return self.images[name] def get_variables(self): \"\"\" Return all variables. \"\"\" for", "= False if key in self.images: result = self.images[key] return", "list of parameters for the CIS. \"\"\" # check for", "in self.images: result = self.images[key] return result def get_images(self): \"\"\"", "CIS given a width and height. \"\"\" self.dims = [w,", "Debug print statement for CIS properties. \"\"\" print(\"printing cis\") print(\"", "CIS. \"\"\" # check for duplicates self.images[name] = image.image(name) return", "set of images in the CIS. \"\"\" # check for", "Add a colormap to the set of colormaps. \"\"\" #if", "def set_dims(self, w, h): \"\"\" Set the dimensions of the", "self.parametertable = None self.variables = {} self.images = {} self.colormaps", "for m in self.colormaps: print(m) for i in self.get_images(): print(\"", "return variable def get_image(self,name): \"\"\" Return an image. \"\"\" image", "{'type':type, 'min':min, 'max':max} def add_image(self, name): \"\"\" Add an image", "= {} self.colormaps = {} def debug_print(self): \"\"\" Debug print", "dimensions of the CIS given a width and height. \"\"\"", "remove_colormap(self, name): \"\"\" Remove a colormap from the set of", "if name in self.colormaps: colormap = self.colormaps[name] return colormap def", "image. \"\"\" image = None if name in self.images: image", "def remove_colormap(self, name): \"\"\" Remove a colormap from the set", "dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\" colormaps:", "\"\"\" colormap = None if name in self.colormaps: colormap =", "def set_parameter_table(self, table): \"\"\" Set parameter table using a deep", "\"\"\" Return all colormaps. \"\"\" for i in self.colormaps: yield", "constructor. \"\"\" self.fname = filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims =", "print(m) for i in self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for l", "print(\" classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\"", "parameters for the CIS. \"\"\" # check for duplicates self.parameterlist.append([name,", "self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name): \"\"\" Remove a", "image to the set of images in the CIS. \"\"\"", "from . import renderer from . import convert class cis:", "def get_image(self, key): \"\"\" Returns an image given its key.", "the list of parameters for the CIS. \"\"\" # check", "in self.variables: yield i def get_variable(self, name): \"\"\" Return a", "def get_images(self): \"\"\" Returns all images. \"\"\" for i in", "filename): \"\"\" The constructor. \"\"\" self.fname = filename self.classname =", "in self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(\"", "hold properties of a Composible Image Set. \"\"\" def __init__(self,", "self.images[name] = image.image(name) return self.images[name] def get_variables(self): \"\"\" Return all", "= self.variables[name] return variable def get_image(self,name): \"\"\" Return an image.", "print(\" image: {}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name))", "layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key): \"\"\" Returns an image", "#if colormap not in dict if (name not in self.colormaps):", "result = self.images[key] return result def get_images(self): \"\"\" Returns all", "variable = None if name in self.variables: variable = self.variables[name]", "of variables. \"\"\" # check for duplicates self.variables[name] = {'type':type,", "\"\"\" image = None if name in self.images: image =", "\"\"\" print(\"printing cis\") print(\" fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\"", "add_parameter(self, name, type): \"\"\" Add a parameter to the list", "of colormaps. \"\"\" #if colormap not in dict if (name", "from . import cisview from . import renderer from .", "Returns all images. \"\"\" for i in self.images: yield i", "Set. \"\"\" def __init__(self, filename): \"\"\" The constructor. \"\"\" self.fname", "if key in self.images: result = self.images[key] return result def", "self.colormaps: yield i def set_dims(self, w, h): \"\"\" Set the", "\"\"\" for i in self.colormaps: yield i def set_dims(self, w,", "def add_parameter(self, name, type): \"\"\" Add a parameter to the", "for the CIS. \"\"\" # check for duplicates self.parameterlist.append([name, type])", "Return an image. \"\"\" image = None if name in", "convert class cis: \"\"\"Composible Image Set Class The data structure", "to the set of colormaps. \"\"\" #if colormap not in", "type, min, max): \"\"\" Add a variable to the set", "\"\"\" result = False if key in self.images: result =", "None self.variables = {} self.images = {} self.colormaps = {}", "\"\"\" # check for duplicates self.variables[name] = {'type':type, 'min':min, 'max':max}", "Add an image to the set of images in the", "image = None if name in self.images: image = self.images[name]", "duplicates self.variables[name] = {'type':type, 'min':min, 'max':max} def add_image(self, name): \"\"\"", "\"\"\" Add a colormap to the set of colormaps. \"\"\"", "in self.variables: variable = self.variables[name] return variable def get_image(self,name): \"\"\"", "for i in self.colormaps: yield i def set_dims(self, w, h):", "given a width and height. \"\"\" self.dims = [w, h]", "dict if (name not in self.colormaps): self.colormaps[name] = colormap.colormap(path) def", "add_image(self, name): \"\"\" Add an image to the set of", "names. \"\"\" return list(self.images.keys()) def set_parameter_table(self, table): \"\"\" Set parameter", "\"\"\" # check for duplicates self.images[name] = image.image(name) return self.images[name]", "[0,0] self.flags = \"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist = []", "{} self.images = {} self.colormaps = {} def debug_print(self): \"\"\"", "add_colormap(self, name, path): \"\"\" Add a colormap to the set", "image: {}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\")", "max): \"\"\" Add a variable to the set of variables.", "variable = self.variables[name] return variable def get_image(self,name): \"\"\" Return an", "in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key): \"\"\"", "<reponame>cinemascience/cinemasc from . import imageview from . import cisview from", "False if key in self.images: result = self.images[key] return result", "CIS properties. \"\"\" print(\"printing cis\") print(\" fname: {}\".format(self.fname)) print(\" classname:", "\"\"\" self.fname = filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0]", "an image to the set of images in the CIS.", "key. \"\"\" result = False if key in self.images: result", "if name in self.images: image = self.images[name] return image def", "colormap.colormap(path) def remove_colormap(self, name): \"\"\" Remove a colormap from the", "a variable. \"\"\" variable = None if name in self.variables:", "cis\") print(\" fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims))", "image.image(name) return self.images[name] def get_variables(self): \"\"\" Return all variables. \"\"\"", "colormap = self.colormaps[name] return colormap def add_colormap(self, name, path): \"\"\"", "the CIS. \"\"\" # check for duplicates self.parameterlist.append([name, type]) def", "\"1.0\" self.parameterlist = [] self.parametertable = None self.variables = {}", "(name not in self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name):", "image given its key. \"\"\" result = False if key", "self.variables[name] = {'type':type, 'min':min, 'max':max} def add_image(self, name): \"\"\" Add", "in self.images: yield i def get_image_names(self): \"\"\" Returns list of", "in self.colormaps: yield i def set_dims(self, w, h): \"\"\" Set", "print(\" fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\"", "the set of colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return", "Returns an image given its key. \"\"\" result = False", "Returns list of image names. \"\"\" return list(self.images.keys()) def set_parameter_table(self,", "flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\" colormaps: \") for m", "properties of a Composible Image Set. \"\"\" def __init__(self, filename):", "= {} def debug_print(self): \"\"\" Debug print statement for CIS", "import renderer from . import convert class cis: \"\"\"Composible Image", "list(self.images.keys()) def set_parameter_table(self, table): \"\"\" Set parameter table using a", "= colormap.colormap(path) def remove_colormap(self, name): \"\"\" Remove a colormap from", "in self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name): \"\"\" Remove", "def add_image(self, name): \"\"\" Add an image to the set", "'max':max} def add_image(self, name): \"\"\" Add an image to the", "= {} self.images = {} self.colormaps = {} def debug_print(self):", "{}\".format(self.version)) print(\" colormaps: \") for m in self.colormaps: print(m) for", "to the list of parameters for the CIS. \"\"\" #", "self.fname = filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags", "import cisview from . import renderer from . import convert", "key in self.images: result = self.images[key] return result def get_images(self):", "from the set of colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\"", "if (name not in self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self,", "\"\"\"Composible Image Set Class The data structure to hold properties", "self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags = \"CONSTANT_CHANNELS\" self.version", "of parameters for the CIS. \"\"\" # check for duplicates", "\"\"\" Add a variable to the set of variables. \"\"\"", "\"\"\" variable = None if name in self.variables: variable =", "= self.images[name] return image def get_colormap(self,name): \"\"\" Return a colormap.", "colormaps. \"\"\" for i in self.colormaps: yield i def set_dims(self,", "fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\" flags:", "min, max): \"\"\" Add a variable to the set of", "Remove a colormap from the set of colormaps. \"\"\" self.colormaps.pop(name)", "\"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags = \"CONSTANT_CHANNELS\" self.version = \"1.0\"", "def get_colormaps(self): \"\"\" Return all colormaps. \"\"\" for i in", "return image def get_colormap(self,name): \"\"\" Return a colormap. \"\"\" colormap", "add_variable(self, name, type, min, max): \"\"\" Add a variable to", "return list(self.images.keys()) def set_parameter_table(self, table): \"\"\" Set parameter table using", "get_images(self): \"\"\" Returns all images. \"\"\" for i in self.images:", "colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return all colormaps. \"\"\"", "Add a variable to the set of variables. \"\"\" #", "get_colormaps(self): \"\"\" Return all colormaps. \"\"\" for i in self.colormaps:", "colormaps. \"\"\" #if colormap not in dict if (name not", "set_dims(self, w, h): \"\"\" Set the dimensions of the CIS", "'min':min, 'max':max} def add_image(self, name): \"\"\" Add an image to", "def get_image(self,name): \"\"\" Return an image. \"\"\" image = None", "# check for duplicates self.parameterlist.append([name, type]) def add_variable(self, name, type,", "set of variables. \"\"\" # check for duplicates self.variables[name] =", "m in self.colormaps: print(m) for i in self.get_images(): print(\" image:", "colormap from the set of colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self):", "= None if name in self.images: image = self.images[name] return", "Composible Image Set. \"\"\" def __init__(self, filename): \"\"\" The constructor.", "copy. \"\"\" self.parametertable = table.copy(deep=True) def add_parameter(self, name, type): \"\"\"", "print(\" colormaps: \") for m in self.colormaps: print(m) for i", "to the set of images in the CIS. \"\"\" #", "None if name in self.variables: variable = self.variables[name] return variable", "print(\" version: {}\".format(self.version)) print(\" colormaps: \") for m in self.colormaps:", "\"\"\" Return a variable. \"\"\" variable = None if name", "structure to hold properties of a Composible Image Set. \"\"\"", "\"\"\" The constructor. \"\"\" self.fname = filename self.classname = \"COMPOSABLE_IMAGE_SET\"", "properties. \"\"\" print(\"printing cis\") print(\" fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname))", "table using a deep copy. \"\"\" self.parametertable = table.copy(deep=True) def", "yield i def get_variable(self, name): \"\"\" Return a variable. \"\"\"", "if name in self.variables: variable = self.variables[name] return variable def", "= self.images[key] return result def get_images(self): \"\"\" Returns all images.", "for duplicates self.variables[name] = {'type':type, 'min':min, 'max':max} def add_image(self, name):", "in dict if (name not in self.colormaps): self.colormaps[name] = colormap.colormap(path)", "= self.colormaps[name] return colormap def add_colormap(self, name, path): \"\"\" Add", "\"\"\" Returns all images. \"\"\" for i in self.images: yield", "\"\"\" Debug print statement for CIS properties. \"\"\" print(\"printing cis\")", "{}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\" colormaps: \")", "debug_print(self): \"\"\" Debug print statement for CIS properties. \"\"\" print(\"printing", "\"\"\" Set the dimensions of the CIS given a width", "in self.colormaps: print(m) for i in self.get_images(): print(\" image: {}\".format(self.get_image(i).name))", "i def get_variable(self, name): \"\"\" Return a variable. \"\"\" variable", "name): \"\"\" Return a variable. \"\"\" variable = None if", "__init__(self, filename): \"\"\" The constructor. \"\"\" self.fname = filename self.classname", "duplicates self.images[name] = image.image(name) return self.images[name] def get_variables(self): \"\"\" Return", "version: {}\".format(self.version)) print(\" colormaps: \") for m in self.colormaps: print(m)", "self.images[name] def get_variables(self): \"\"\" Return all variables. \"\"\" for i", "import imageview from . import cisview from . import renderer", "\"\"\" for i in self.images: yield i def get_image_names(self): \"\"\"", "of a Composible Image Set. \"\"\" def __init__(self, filename): \"\"\"", "i def get_image_names(self): \"\"\" Returns list of image names. \"\"\"", "for i in self.variables: yield i def get_variable(self, name): \"\"\"", "\"\"\" for i in self.variables: yield i def get_variable(self, name):", "variable to the set of variables. \"\"\" # check for", "self.parametertable = table.copy(deep=True) def add_parameter(self, name, type): \"\"\" Add a", "Return all colormaps. \"\"\" for i in self.colormaps: yield i", "of images in the CIS. \"\"\" # check for duplicates", "to the set of variables. \"\"\" # check for duplicates", "= \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags = \"CONSTANT_CHANNELS\" self.version =", "{}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def", "self.colormaps: colormap = self.colormaps[name] return colormap def add_colormap(self, name, path):", "colormap def add_colormap(self, name, path): \"\"\" Add a colormap to", "table.copy(deep=True) def add_parameter(self, name, type): \"\"\" Add a parameter to", "check for duplicates self.parameterlist.append([name, type]) def add_variable(self, name, type, min,", "self.parameterlist.append([name, type]) def add_variable(self, name, type, min, max): \"\"\" Add", "self.colormaps[name] return colormap def add_colormap(self, name, path): \"\"\" Add a", "the set of colormaps. \"\"\" #if colormap not in dict", "Image Set. \"\"\" def __init__(self, filename): \"\"\" The constructor. \"\"\"", "= image.image(name) return self.images[name] def get_variables(self): \"\"\" Return all variables.", "cis: \"\"\"Composible Image Set Class The data structure to hold", "\"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return all colormaps. \"\"\" for", "name in self.variables: variable = self.variables[name] return variable def get_image(self,name):", "= [] self.parametertable = None self.variables = {} self.images =", "\"\"\" def __init__(self, filename): \"\"\" The constructor. \"\"\" self.fname =", "\"\"\" return list(self.images.keys()) def set_parameter_table(self, table): \"\"\" Set parameter table", "i in self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers():", "an image given its key. \"\"\" result = False if", "self.flags = \"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist = [] self.parametertable", "def add_colormap(self, name, path): \"\"\" Add a colormap to the", "\"\"\" Returns an image given its key. \"\"\" result =", "h): \"\"\" Set the dimensions of the CIS given a", "{} self.colormaps = {} def debug_print(self): \"\"\" Debug print statement", "from . import convert class cis: \"\"\"Composible Image Set Class", "Set the dimensions of the CIS given a width and", "\"\"\" Return a colormap. \"\"\" colormap = None if name", "the CIS. \"\"\" # check for duplicates self.images[name] = image.image(name)", "imageview from . import cisview from . import renderer from", "given its key. \"\"\" result = False if key in", "get_variables(self): \"\"\" Return all variables. \"\"\" for i in self.variables:", "The data structure to hold properties of a Composible Image", "self.dims = [0,0] self.flags = \"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist", "= \"1.0\" self.parameterlist = [] self.parametertable = None self.variables =", "type]) def add_variable(self, name, type, min, max): \"\"\" Add a", "self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name): \"\"\" Remove a colormap", "in self.colormaps: colormap = self.colormaps[name] return colormap def add_colormap(self, name,", "print statement for CIS properties. \"\"\" print(\"printing cis\") print(\" fname:", "colormap not in dict if (name not in self.colormaps): self.colormaps[name]", "list of image names. \"\"\" return list(self.images.keys()) def set_parameter_table(self, table):", "for i in self.images: yield i def get_image_names(self): \"\"\" Returns", "the dimensions of the CIS given a width and height.", "self.variables = {} self.images = {} self.colormaps = {} def", "Add a parameter to the list of parameters for the", "a variable to the set of variables. \"\"\" # check", "self.images = {} self.colormaps = {} def debug_print(self): \"\"\" Debug", "to hold properties of a Composible Image Set. \"\"\" def", "of the CIS given a width and height. \"\"\" self.dims", "self.images: image = self.images[name] return image def get_colormap(self,name): \"\"\" Return", "for i in self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for l in", "self.version = \"1.0\" self.parameterlist = [] self.parametertable = None self.variables", "image names. \"\"\" return list(self.images.keys()) def set_parameter_table(self, table): \"\"\" Set", "class cis: \"\"\"Composible Image Set Class The data structure to", "print(\" flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\" colormaps: \") for", "Image Set Class The data structure to hold properties of", "using a deep copy. \"\"\" self.parametertable = table.copy(deep=True) def add_parameter(self,", "name, type): \"\"\" Add a parameter to the list of", "of colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return all colormaps.", "name in self.colormaps: colormap = self.colormaps[name] return colormap def add_colormap(self,", "variables. \"\"\" for i in self.variables: yield i def get_variable(self,", "self.images[key] return result def get_images(self): \"\"\" Returns all images. \"\"\"", "type): \"\"\" Add a parameter to the list of parameters", "= filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags =", "path): \"\"\" Add a colormap to the set of colormaps.", "name): \"\"\" Remove a colormap from the set of colormaps.", "# check for duplicates self.variables[name] = {'type':type, 'min':min, 'max':max} def", "for duplicates self.images[name] = image.image(name) return self.images[name] def get_variables(self): \"\"\"", "for duplicates self.parameterlist.append([name, type]) def add_variable(self, name, type, min, max):", "i in self.colormaps: yield i def set_dims(self, w, h): \"\"\"", "images. \"\"\" for i in self.images: yield i def get_image_names(self):", "get_colormap(self,name): \"\"\" Return a colormap. \"\"\" colormap = None if", "name in self.images: image = self.images[name] return image def get_colormap(self,name):", "all images. \"\"\" for i in self.images: yield i def", "\"\"\" Remove a colormap from the set of colormaps. \"\"\"", "{}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version))", "= None if name in self.variables: variable = self.variables[name] return", "= \"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist = [] self.parametertable =", "self.variables[name] return variable def get_image(self,name): \"\"\" Return an image. \"\"\"", "print(\" dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\"", "None if name in self.colormaps: colormap = self.colormaps[name] return colormap", "\"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist = [] self.parametertable = None", "\"\"\" #if colormap not in dict if (name not in", "l in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key):", "an image. \"\"\" image = None if name in self.images:", "def get_image_names(self): \"\"\" Returns list of image names. \"\"\" return", "statement for CIS properties. \"\"\" print(\"printing cis\") print(\" fname: {}\".format(self.fname))", "\"\"\" # check for duplicates self.parameterlist.append([name, type]) def add_variable(self, name,", "def __init__(self, filename): \"\"\" The constructor. \"\"\" self.fname = filename", "\"\"\" Return all variables. \"\"\" for i in self.variables: yield", "not in self.colormaps): self.colormaps[name] = colormap.colormap(path) def remove_colormap(self, name): \"\"\"", "a colormap from the set of colormaps. \"\"\" self.colormaps.pop(name) def", "data structure to hold properties of a Composible Image Set.", "i def set_dims(self, w, h): \"\"\" Set the dimensions of", "{}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key): \"\"\" Returns an image given", "self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key): \"\"\" Returns", "colormap. \"\"\" colormap = None if name in self.colormaps: colormap", "= [0,0] self.flags = \"CONSTANT_CHANNELS\" self.version = \"1.0\" self.parameterlist =", "return result def get_images(self): \"\"\" Returns all images. \"\"\" for", "self.images: yield i def get_image_names(self): \"\"\" Returns list of image", "= table.copy(deep=True) def add_parameter(self, name, type): \"\"\" Add a parameter", "def debug_print(self): \"\"\" Debug print statement for CIS properties. \"\"\"", "set of colormaps. \"\"\" self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return all", "Return a colormap. \"\"\" colormap = None if name in", "name, path): \"\"\" Add a colormap to the set of", "w, h): \"\"\" Set the dimensions of the CIS given", "[] self.parametertable = None self.variables = {} self.images = {}", "get_image_names(self): \"\"\" Returns list of image names. \"\"\" return list(self.images.keys())", "in the CIS. \"\"\" # check for duplicates self.images[name] =", "the set of images in the CIS. \"\"\" # check", "image = self.images[name] return image def get_colormap(self,name): \"\"\" Return a", "for CIS properties. \"\"\" print(\"printing cis\") print(\" fname: {}\".format(self.fname)) print(\"", ". import convert class cis: \"\"\"Composible Image Set Class The", "= {'type':type, 'min':min, 'max':max} def add_image(self, name): \"\"\" Add an", "get_image(self,name): \"\"\" Return an image. \"\"\" image = None if", "The constructor. \"\"\" self.fname = filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims", ". import cisview from . import renderer from . import", "self.images: result = self.images[key] return result def get_images(self): \"\"\" Returns", "self.variables: yield i def get_variable(self, name): \"\"\" Return a variable.", "def add_variable(self, name, type, min, max): \"\"\" Add a variable", ". import renderer from . import convert class cis: \"\"\"Composible", "colormaps: \") for m in self.colormaps: print(m) for i in", "self.colormaps = {} def debug_print(self): \"\"\" Debug print statement for", "def get_variable(self, name): \"\"\" Return a variable. \"\"\" variable =", "a colormap. \"\"\" colormap = None if name in self.colormaps:", "yield i def get_image_names(self): \"\"\" Returns list of image names.", "in self.images: image = self.images[name] return image def get_colormap(self,name): \"\"\"", "Set Class The data structure to hold properties of a", "check for duplicates self.images[name] = image.image(name) return self.images[name] def get_variables(self):", ". import imageview from . import cisview from . import", "of image names. \"\"\" return list(self.images.keys()) def set_parameter_table(self, table): \"\"\"", "result def get_images(self): \"\"\" Returns all images. \"\"\" for i", "deep copy. \"\"\" self.parametertable = table.copy(deep=True) def add_parameter(self, name, type):", "\"\"\" Returns list of image names. \"\"\" return list(self.images.keys()) def", "= None if name in self.colormaps: colormap = self.colormaps[name] return", "a Composible Image Set. \"\"\" def __init__(self, filename): \"\"\" The", "get_variable(self, name): \"\"\" Return a variable. \"\"\" variable = None", "print(\"printing cis\") print(\" fname: {}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\" dims:", "print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self, key): \"\"\" Returns an", "i in self.images: yield i def get_image_names(self): \"\"\" Returns list", "images in the CIS. \"\"\" # check for duplicates self.images[name]", "result = False if key in self.images: result = self.images[key]", "the CIS given a width and height. \"\"\" self.dims =", "parameter to the list of parameters for the CIS. \"\"\"", "\"\"\" Add an image to the set of images in", "variables. \"\"\" # check for duplicates self.variables[name] = {'type':type, 'min':min,", "Return a variable. \"\"\" variable = None if name in", "{}\".format(self.flags)) print(\" version: {}\".format(self.version)) print(\" colormaps: \") for m in", "Set parameter table using a deep copy. \"\"\" self.parametertable =", "\") for m in self.colormaps: print(m) for i in self.get_images():", "from . import imageview from . import cisview from .", "{} def debug_print(self): \"\"\" Debug print statement for CIS properties.", "\"\"\" Add a parameter to the list of parameters for", "\"\"\" Set parameter table using a deep copy. \"\"\" self.parametertable", "Class The data structure to hold properties of a Composible", "all colormaps. \"\"\" for i in self.colormaps: yield i def", "self.colormaps.pop(name) def get_colormaps(self): \"\"\" Return all colormaps. \"\"\" for i", "variable def get_image(self,name): \"\"\" Return an image. \"\"\" image =", "\"\"\" Return an image. \"\"\" image = None if name", "self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for l in self.get_image(i).get_layers(): print(\" layer:", "\"\"\" self.parametertable = table.copy(deep=True) def add_parameter(self, name, type): \"\"\" Add", "{}\".format(self.fname)) print(\" classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags))", "None if name in self.images: image = self.images[name] return image", "name, type, min, max): \"\"\" Add a variable to the", "CIS. \"\"\" # check for duplicates self.parameterlist.append([name, type]) def add_variable(self,", "table): \"\"\" Set parameter table using a deep copy. \"\"\"", "self.parameterlist = [] self.parametertable = None self.variables = {} self.images", "a colormap to the set of colormaps. \"\"\" #if colormap", "parameter table using a deep copy. \"\"\" self.parametertable = table.copy(deep=True)", "self.colormaps: print(m) for i in self.get_images(): print(\" image: {}\".format(self.get_image(i).name)) for", "cisview from . import renderer from . import convert class", "name): \"\"\" Add an image to the set of images", "image def get_colormap(self,name): \"\"\" Return a colormap. \"\"\" colormap =", "filename self.classname = \"COMPOSABLE_IMAGE_SET\" self.dims = [0,0] self.flags = \"CONSTANT_CHANNELS\"", "not in dict if (name not in self.colormaps): self.colormaps[name] =", "= None self.variables = {} self.images = {} self.colormaps =", "colormap to the set of colormaps. \"\"\" #if colormap not", "get_image(self, key): \"\"\" Returns an image given its key. \"\"\"", "a parameter to the list of parameters for the CIS.", "a deep copy. \"\"\" self.parametertable = table.copy(deep=True) def add_parameter(self, name,", "key): \"\"\" Returns an image given its key. \"\"\" result", "return colormap def add_colormap(self, name, path): \"\"\" Add a colormap", "self.variables: variable = self.variables[name] return variable def get_image(self,name): \"\"\" Return", "def get_variables(self): \"\"\" Return all variables. \"\"\" for i in", "the set of variables. \"\"\" # check for duplicates self.variables[name]", "all variables. \"\"\" for i in self.variables: yield i def", "import convert class cis: \"\"\"Composible Image Set Class The data", "i in self.variables: yield i def get_variable(self, name): \"\"\" Return", "set_parameter_table(self, table): \"\"\" Set parameter table using a deep copy.", "self.images[name] return image def get_colormap(self,name): \"\"\" Return a colormap. \"\"\"", "for l in self.get_image(i).get_layers(): print(\" layer: {}\".format(self.get_image(i).get_layer(l).name)) print(\"\\n\") def get_image(self,", "Return all variables. \"\"\" for i in self.variables: yield i", "its key. \"\"\" result = False if key in self.images:", "def get_colormap(self,name): \"\"\" Return a colormap. \"\"\" colormap = None", "classname: {}\".format(self.classname)) print(\" dims: {}\".format(self.dims)) print(\" flags: {}\".format(self.flags)) print(\" version:" ]
[ "Spaghetti using Diffusion Imaging in Python (dipy.org) and Free On", "labeler import TrackLabeler from fos.actor.slicer import Slicer #dipy modules from", "the world to the window wi.attach(w) #create a manager which", "wi.attach(w) #create a manager which can handle multiple windows wm", "== '__main__': subject = 5 seeds = 1 qb_dist =", "dpr.close() #load initial QuickBundles with threshold 30mm fpkl = 'data/subj_'+(\"%02d\"", "using Diffusion Imaging in Python (dipy.org) and Free On Shades", "= True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] =", "subject = 5 seeds = 1 qb_dist = 30 #load", "the actors to the world w=World() w.add(tl) w.add(sl) #w.add(ax) #create", "the world w=World() w.add(tl) w.add(sl) #w.add(ax) #create a window wi", "with threshold 30mm fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl)", "from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import copy", "#load T1 volume registered in MNI space img = nib.load('data/subj_'+(\"%02d\"", "space img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine", "threshold 30mm fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create", "ax = Axes(100) x,y,z=data.shape #add the actors to the world", "sl tl.slicer=sl #OpenGL coordinate system axes ax = Axes(100) x,y,z=data.shape", "data = img.get_data() affine = img.get_affine() #load the tracks registered", "#qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system for tracks tl =", "#OpenGL coordinate system axes ax = Axes(100) x,y,z=data.shape #add the", "slicing/masking tool sl = Slicer(affine,data) #add one way communication between", "from fos.actor.axes import Axes from fos import World, Window, WindowManager", "QuickBundles from dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle,save_pickle from", "import World, Window, WindowManager from labeler import TrackLabeler from fos.actor.slicer", "#add one way communication between tl and sl tl.slicer=sl #OpenGL", "<gh_stars>1-10 import numpy as np import nibabel as nib import", "= 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T =", "from fos.actor.slicer import Slicer #dipy modules from dipy.segment.quickbundles import QuickBundles", "system axes ax = Axes(100) x,y,z=data.shape #add the actors to", "Slicer #dipy modules from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import", "#pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture']", "import os.path as op import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11']", "import Slicer #dipy modules from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy", "import copy if __name__ == '__main__': subject = 5 seeds", "= True #pyglet.options['debug_texture'] = True #fos modules from fos.actor.axes import", "w=World() w.add(tl) w.add(sl) #w.add(ax) #create a window wi = Window(caption=\"Interactive", "dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load initial", "from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import Dpy from dipy.io.pickles", "World, Window, WindowManager from labeler import TrackLabeler from fos.actor.slicer import", "between tl and sl tl.slicer=sl #OpenGL coordinate system axes ax", "True #pyglet.options['debug_texture'] = True #fos modules from fos.actor.axes import Axes", "(dipy.org) and Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world", "tool sl = Slicer(affine,data) #add one way communication between tl", "#create the interaction system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add", "#pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True #fos", "Imaging in Python (dipy.org) and Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800)", "% subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine = img.get_affine() #load the", "if __name__ == '__main__': subject = 5 seeds = 1", "Python (dipy.org) and Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the", "#fos modules from fos.actor.axes import Axes from fos import World,", "tracks registered in MNI space fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy'", "T = dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold 30mm", "qb_dist = 30 #load T1 volume registered in MNI space", "TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool sl = Slicer(affine,data) #add", "True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True", "Window, WindowManager from labeler import TrackLabeler from fos.actor.slicer import Slicer", "= True #fos modules from fos.actor.axes import Axes from fos", "= img.get_affine() #load the tracks registered in MNI space fdpyw", "Slicer(affine,data) #add one way communication between tl and sl tl.slicer=sl", "a interactive slicing/masking tool sl = Slicer(affine,data) #add one way", "5 seeds = 1 qb_dist = 30 #load T1 volume", "world w=World() w.add(tl) w.add(sl) #w.add(ax) #create a window wi =", "Dpy from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import", "import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] =", "Window(caption=\"Interactive Spaghetti using Diffusion Imaging in Python (dipy.org) and Free", "np import nibabel as nib import os.path as op import", "actors to the world w=World() w.add(tl) w.add(sl) #w.add(ax) #create a", "pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace'] = True", "#pyglet.options['debug_texture'] = True #fos modules from fos.actor.axes import Axes from", "'r') T = dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold", "tl and sl tl.slicer=sl #OpenGL coordinate system axes ax =", "can handle multiple windows wm = WindowManager() wm.add(wi) wm.run() print('Everything", "#add the actors to the world w=World() w.add(tl) w.add(sl) #w.add(ax)", "as nib import os.path as op import pyglet #pyglet.options['debug_gl'] =", "interaction system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive", "(fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the window wi.attach(w) #create", "'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system for", "tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool sl =", "manager which can handle multiple windows wm = WindowManager() wm.add(wi)", "Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load initial QuickBundles with", "x,y,z=data.shape #add the actors to the world w=World() w.add(tl) w.add(sl)", "the window wi.attach(w) #create a manager which can handle multiple", "which can handle multiple windows wm = WindowManager() wm.add(wi) wm.run()", "nibabel as nib import os.path as op import pyglet #pyglet.options['debug_gl']", "subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine = img.get_affine() #load the tracks", "from dipy.viz.colormap import orient2rgb import copy if __name__ == '__main__':", "initial QuickBundles with threshold 30mm fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl'", "#add a interactive slicing/masking tool sl = Slicer(affine,data) #add one", "way communication between tl and sl tl.slicer=sl #OpenGL coordinate system", "Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the", "affine = img.get_affine() #load the tracks registered in MNI space", "subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load", "30mm fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the", "= 30 #load T1 volume registered in MNI space img", "MNI space img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data()", "seeds = 1 qb_dist = 30 #load T1 volume registered", "import Dpy from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb", "__name__ == '__main__': subject = 5 seeds = 1 qb_dist", "wi = Window(caption=\"Interactive Spaghetti using Diffusion Imaging in Python (dipy.org)", "modules from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import Dpy from", "'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks()", "Axes(100) x,y,z=data.shape #add the actors to the world w=World() w.add(tl)", "in MNI space img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data =", "% subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system for tracks", "= Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close() #load initial QuickBundles", "fos.actor.slicer import Slicer #dipy modules from dipy.segment.quickbundles import QuickBundles from", "sl = Slicer(affine,data) #add one way communication between tl and", "img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine =", "= Axes(100) x,y,z=data.shape #add the actors to the world w=World()", "coordinate system axes ax = Axes(100) x,y,z=data.shape #add the actors", "and Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to", "#load initial QuickBundles with threshold 30mm fpkl = 'data/subj_'+(\"%02d\" %", "dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import Dpy from dipy.io.pickles import", "bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the window wi.attach(w) #create a", "#load the tracks registered in MNI space fdpyw = 'data/subj_'+(\"%02d\"", "the tracks registered in MNI space fdpyw = 'data/subj_'+(\"%02d\" %", "import Axes from fos import World, Window, WindowManager from labeler", "one way communication between tl and sl tl.slicer=sl #OpenGL coordinate", "multiple windows wm = WindowManager() wm.add(wi) wm.run() print('Everything is running", "QuickBundles with threshold 30mm fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12)", "= 1 qb_dist = 30 #load T1 volume registered in", "to the window wi.attach(w) #create a manager which can handle", "registered in MNI space fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr", "in Python (dipy.org) and Free On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach", "handle multiple windows wm = WindowManager() wm.add(wi) wm.run() print('Everything is", "img.get_data() affine = img.get_affine() #load the tracks registered in MNI", "interactive slicing/masking tool sl = Slicer(affine,data) #add one way communication", "in MNI space fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr =", "#dipy modules from dipy.segment.quickbundles import QuickBundles from dipy.io.dpy import Dpy", "#pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True #fos modules from fos.actor.axes", "fpkl = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction", "#create a manager which can handle multiple windows wm =", "registered in MNI space img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data", "= 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system", "space fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r')", "os.path as op import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] =", "fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T", "and sl tl.slicer=sl #OpenGL coordinate system axes ax = Axes(100)", "a window wi = Window(caption=\"Interactive Spaghetti using Diffusion Imaging in", "from fos import World, Window, WindowManager from labeler import TrackLabeler", "axes ax = Axes(100) x,y,z=data.shape #add the actors to the", "T1 volume registered in MNI space img = nib.load('data/subj_'+(\"%02d\" %", "= Slicer(affine,data) #add one way communication between tl and sl", "qb=load_pickle(fpkl) #create the interaction system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1)", "% subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw, 'r') T = dpr.read_tracks() dpr.close()", "WindowManager from labeler import TrackLabeler from fos.actor.slicer import Slicer #dipy", "dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import copy if", "= img.get_data() affine = img.get_affine() #load the tracks registered in", "= True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True #fos modules", "fos import World, Window, WindowManager from labeler import TrackLabeler from", "MNI space fdpyw = 'data/subj_'+(\"%02d\" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy' dpr = Dpy(fdpyw,", "window wi = Window(caption=\"Interactive Spaghetti using Diffusion Imaging in Python", "as np import nibabel as nib import os.path as op", "from labeler import TrackLabeler from fos.actor.slicer import Slicer #dipy modules", "w.add(sl) #w.add(ax) #create a window wi = Window(caption=\"Interactive Spaghetti using", "as op import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True", "communication between tl and sl tl.slicer=sl #OpenGL coordinate system axes", "tl.slicer=sl #OpenGL coordinate system axes ax = Axes(100) x,y,z=data.shape #add", "numpy as np import nibabel as nib import os.path as", "#attach the world to the window wi.attach(w) #create a manager", "orient2rgb import copy if __name__ == '__main__': subject = 5", "windows wm = WindowManager() wm.add(wi) wm.run() print('Everything is running ;-)')", "from dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap", "a manager which can handle multiple windows wm = WindowManager()", "On Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the window", "Axes from fos import World, Window, WindowManager from labeler import", "system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking", "True #fos modules from fos.actor.axes import Axes from fos import", "world to the window wi.attach(w) #create a manager which can", "nib import os.path as op import pyglet #pyglet.options['debug_gl'] = True", "window wi.attach(w) #create a manager which can handle multiple windows", "subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl' #qb=QuickBundles(T,30.,12) qb=load_pickle(fpkl) #create the interaction system for tracks tl", "= TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool sl = Slicer(affine,data)", "= nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine = img.get_affine()", "#w.add(ax) #create a window wi = Window(caption=\"Interactive Spaghetti using Diffusion", "import numpy as np import nibabel as nib import os.path", "tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool sl", "import TrackLabeler from fos.actor.slicer import Slicer #dipy modules from dipy.segment.quickbundles", "True #pyglet.options['debug_gl_trace'] = True #pyglet.options['debug_texture'] = True #fos modules from", "#create a window wi = Window(caption=\"Interactive Spaghetti using Diffusion Imaging", "fos.actor.axes import Axes from fos import World, Window, WindowManager from", "TrackLabeler from fos.actor.slicer import Slicer #dipy modules from dipy.segment.quickbundles import", "for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a interactive slicing/masking tool", "= Window(caption=\"Interactive Spaghetti using Diffusion Imaging in Python (dipy.org) and", "the interaction system for tracks tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1) #add a", "nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz') data = img.get_data() affine = img.get_affine() #load", "1 qb_dist = 30 #load T1 volume registered in MNI", "dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle,save_pickle from dipy.viz.colormap import", "dipy.viz.colormap import orient2rgb import copy if __name__ == '__main__': subject", "Shades (fos.me)\",\\ bgcolor=(0.3,0.3,0.6,1),width=1200,height=800) #attach the world to the window wi.attach(w)", "import load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import copy if __name__", "import orient2rgb import copy if __name__ == '__main__': subject =", "copy if __name__ == '__main__': subject = 5 seeds =", "img.get_affine() #load the tracks registered in MNI space fdpyw =", "= dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold 30mm fpkl", "import nibabel as nib import os.path as op import pyglet", "'__main__': subject = 5 seeds = 1 qb_dist = 30", "w.add(tl) w.add(sl) #w.add(ax) #create a window wi = Window(caption=\"Interactive Spaghetti", "modules from fos.actor.axes import Axes from fos import World, Window,", "volume registered in MNI space img = nib.load('data/subj_'+(\"%02d\" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz')", "import QuickBundles from dipy.io.dpy import Dpy from dipy.io.pickles import load_pickle,save_pickle", "Diffusion Imaging in Python (dipy.org) and Free On Shades (fos.me)\",\\", "30 #load T1 volume registered in MNI space img =", "= 5 seeds = 1 qb_dist = 30 #load T1", "op import pyglet #pyglet.options['debug_gl'] = True #pyglet.options['debug_x11'] = True #pyglet.options['debug_gl_trace']", "dpr.read_tracks() dpr.close() #load initial QuickBundles with threshold 30mm fpkl =", "to the world w=World() w.add(tl) w.add(sl) #w.add(ax) #create a window", "load_pickle,save_pickle from dipy.viz.colormap import orient2rgb import copy if __name__ ==" ]
[ "= chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30) cv2.imshow(\"Image\",", "w + 30)] face = cv2.resize(face, (48, 48)) face =", ") gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2,", "w, h) in faces: face = img[(y - 60):(y +", "= cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces =", "face = face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender =", "255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y),", "face = img[(y - 60):(y + h + 60), (x", "chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30) cv2.imshow(\"Image\", img)", "cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier", "gender, x + h, y, color, 30) cv2.imshow(\"Image\", img) cv2.waitKey(0)", "= {0: '女', 1: '男'} color = (255, 255, 255)", "gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels", "(x, y), (x + h, y + w), color, 2)", "face = cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0) face", "(x + h, y + w), color, 2) img =", "(255, 255, 255) for (x, y, w, h) in faces:", "0) face = face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender", "255, 255) for (x, y, w, h) in faces: face", "minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0:", "140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女', 1:", "faces: face = img[(y - 60):(y + h + 60),", "in faces: face = img[(y - 60):(y + h +", "= (255, 255, 255) for (x, y, w, h) in", "h) in faces: face = img[(y - 60):(y + h", "/ 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x,", "+ w), color, 2) img = chineseText.cv2ImgAddText(img, gender, x +", "48)) face = np.expand_dims(face, 0) face = face / 255.0", "#coding=utf-8 #性别识别 import cv2 from keras.models import load_model import numpy", "= face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model(", "(x, y, w, h) in faces: face = img[(y -", "for (x, y, w, h) in faces: face = img[(y", "2) img = chineseText.cv2ImgAddText(img, gender, x + h, y, color,", "color = (255, 255, 255) for (x, y, w, h)", "= cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img,", "face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces", "60), (x - 30):(x + w + 30)] face =", "'女', 1: '男'} color = (255, 255, 255) for (x,", "gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x + h, y + w),", "\"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray,", "+ 60), (x - 30):(x + w + 30)] face", "faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier =", "cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140))", "gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x + h, y", "255) for (x, y, w, h) in faces: face =", "img[(y - 60):(y + h + 60), (x - 30):(x", "face = np.expand_dims(face, 0) face = face / 255.0 gender_label_arg", "gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3,", "w), color, 2) img = chineseText.cv2ImgAddText(img, gender, x + h,", "np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x + h,", "minSize=(140, 140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女',", "cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)", "(48, 48)) face = np.expand_dims(face, 0) face = face /", "= img[(y - 60):(y + h + 60), (x -", "face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img,", "= cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0) face =", "+ 30)] face = cv2.resize(face, (48, 48)) face = np.expand_dims(face,", "chineseText img = cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray", "(x - 30):(x + w + 30)] face = cv2.resize(face,", "load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女', 1: '男'} color =", "img = chineseText.cv2ImgAddText(img, gender, x + h, y, color, 30)", "h, y + w), color, 2) img = chineseText.cv2ImgAddText(img, gender,", "30)] face = cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0)", "cv2 from keras.models import load_model import numpy as np import", "#性别识别 import cv2 from keras.models import load_model import numpy as", "import numpy as np import chineseText img = cv2.imread(\"img/gather.png\") face_classifier", "gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女', 1: '男'}", "'男'} color = (255, 255, 255) for (x, y, w,", "= np.expand_dims(face, 0) face = face / 255.0 gender_label_arg =", "numpy as np import chineseText img = cv2.imread(\"img/gather.png\") face_classifier =", "face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\")", "load_model import numpy as np import chineseText img = cv2.imread(\"img/gather.png\")", "+ h + 60), (x - 30):(x + w +", "h + 60), (x - 30):(x + w + 30)]", "- 30):(x + w + 30)] face = cv2.resize(face, (48,", "cv2.resize(face, (48, 48)) face = np.expand_dims(face, 0) face = face", "img = cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray =", "from keras.models import load_model import numpy as np import chineseText", "= np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x +", "1: '男'} color = (255, 255, 255) for (x, y,", "cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" ) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale(", "= gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x + h, y +", "import cv2 from keras.models import load_model import numpy as np", "import load_model import numpy as np import chineseText img =", "+ h, y + w), color, 2) img = chineseText.cv2ImgAddText(img,", "np import chineseText img = cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\"", "y), (x + h, y + w), color, 2) img", "np.expand_dims(face, 0) face = face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face))", "scaleFactor=1.2, minNeighbors=3, minSize=(140, 140)) gender_classifier = load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels =", "= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_classifier.detectMultiScale( gray, scaleFactor=1.2, minNeighbors=3, minSize=(140,", "60):(y + h + 60), (x - 30):(x + w", "\"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女', 1: '男'} color = (255,", "+ w + 30)] face = cv2.resize(face, (48, 48)) face", "y + w), color, 2) img = chineseText.cv2ImgAddText(img, gender, x", "cv2.rectangle(img, (x, y), (x + h, y + w), color,", "- 60):(y + h + 60), (x - 30):(x +", "keras.models import load_model import numpy as np import chineseText img", "x + h, y, color, 30) cv2.imshow(\"Image\", img) cv2.waitKey(0) cv2.destroyAllWindows()", "= face / 255.0 gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg]", "as np import chineseText img = cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier(", "y, w, h) in faces: face = img[(y - 60):(y", "{0: '女', 1: '男'} color = (255, 255, 255) for", "gender_labels = {0: '女', 1: '男'} color = (255, 255,", "30):(x + w + 30)] face = cv2.resize(face, (48, 48))", "gender_label_arg = np.argmax(gender_classifier.predict(face)) gender = gender_labels[gender_label_arg] cv2.rectangle(img, (x, y), (x", "import chineseText img = cv2.imread(\"img/gather.png\") face_classifier = cv2.CascadeClassifier( \"d:\\Python36\\Lib\\site-packages\\opencv-master\\data\\haarcascades\\haarcascade_frontalface_default.xml\" )", "= load_model( \"classifier/gender_models/simple_CNN.81-0.96.hdf5\") gender_labels = {0: '女', 1: '男'} color", "color, 2) img = chineseText.cv2ImgAddText(img, gender, x + h, y," ]
[ "blank attendances as necessary. Returns the created profile. \"\"\" student", "for them as follows: - 2 coords per course -", "role=role ) def create_empty_section_for(mentor): \"\"\" Creates a section for MENTOR", "range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _ in", "6)): students.append( enroll_user_as_student(next(users), section) ) except StopIteration: pass cls.users =", "is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp def", ") def req_fails_method(self, method, endpoint, data=None): \"\"\" Performs a request", "= give_role(next(users), role, c) profile.leader = leader lst.append(profile) return profile", "users cls.courses = courses cls.coords = coords cls.seniors = seniors", "with a section of 3-6 students \"\"\" users = iter(make_test_users(NUM_USERS))", "section of 3-6 students - 3 JMs per SM, each", "leader, c, lst): # returns the profile created profile =", "supporting the provided method. Returns the response object. \"\"\" return", "juniors, students = [], [], [], [] COORD_COUNT = 2", "resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if exp_code is not", "the profile to database. \"\"\" return ProfileFactory.create( user=user, course=course, leader=None,", "def req_fails_perms(self, method, endpoint, data=None): \"\"\" Performs a request to", "scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory,", "client def request(self, method, endpoint, exp_code=None, data=None): \"\"\" Performs a", "not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self,", "\"\"\" Creates a student profile for USER, and assigns them", "= coords cls.seniors = seniors cls.juniors = juniors cls.students =", "and saves the profile to database. \"\"\" return ProfileFactory.create( user=user,", "the status code of the response is exp_code, if provided.", "k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs for k", "\"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self, user):", "seniors) section = create_empty_section_for(sm) for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users),", "database. \"\"\" src = clazz.objects.all() for _ in range(n): yield", "Adds NUM_USERS users to the database and initializes profiles for", "except StopIteration: pass cls.users = users cls.courses = courses cls.coords", "checks that it fails due to the endpoint not supporting", "should be a get/post/etc from an APIClient object. \"\"\" resp", "users = iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for sanity tests,", "= 4 JM_COUNT = 3 def assign(role, leader, c, lst):", "students = [], [], [], [] COORD_COUNT = 2 SM_COUNT", "is logged in as the provided user.\"\"\" client = APIClient()", "enroll_user_as_student(user, section): \"\"\" Creates a student profile for USER, and", "provided method. Returns the response object. \"\"\" return self.request( method,", "3-6 students - 3 JMs per SM, each with a", "random_objs(clazz, n=1): \"\"\" Generates N instances of the provided class,", "Also creates blank attendances as necessary. Returns the created profile.", "\"\"\" Creates a section for MENTOR without populated students. \"\"\"", "data=data ) def req_succeeds(self, method, endpoint, data=None): \"\"\" Performs a", "AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\",", "method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method, endpoint, data=None):", "\"\"\" return ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role ) def", "section of 3-6 students \"\"\" users = iter(make_test_users(NUM_USERS)) courses =", "a given ROLE for the provided COURSE, and saves the", "the response object. \"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) #", "a get/post/etc from an APIClient object. Returns the response object", "SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\",", "courses = make_test_courses() # for sanity tests, everyone only has", "returns the response object. Also checks if the status code", "mentor=mentor) def enroll_user_as_student(user, section): \"\"\" Creates a student profile for", "data=None): \"\"\" Performs a request to the specified endpoint and", "students \"\"\" users = iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for", "3-6 students \"\"\" users = iter(make_test_users(NUM_USERS)) courses = make_test_courses() #", "= len(courses) coords, seniors, juniors, students = [], [], [],", "def random_objs(clazz, n=1): \"\"\" Generates N instances of the provided", "Returns the response object afterwards. \"\"\" return self.request( method, endpoint,", "c) profile.leader = leader lst.append(profile) return profile try: for c", "- 2 coords per course - 4 SMs per coord,", "database. \"\"\" return ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role )", "course=course, leader=None, section=None, role=role ) def create_empty_section_for(mentor): \"\"\" Creates a", "that it succeeds. The method parameter should be a get/post/etc", "of the provided class, retrieved from the database. \"\"\" src", "COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP =", "the user lacking proper permissions. The method parameter should be", "JMs per SM, each with a section of 3-6 students", "section = create_empty_section_for(sm) for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section))", "that is logged in as the provided user.\"\"\" client =", "\"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION", "import path from rest_framework import status from rest_framework.test import APIClient", "USER in a given ROLE for the provided COURSE, and", "for k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors)", "object afterwards. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data )", "= make_test_courses() # for sanity tests, everyone only has one", "req_succeeds(self, method, endpoint, data=None): \"\"\" Performs a request to the", "method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method, endpoint, data=None):", "def enroll_user_as_student(user, section): \"\"\" Creates a student profile for USER,", "def make_test_courses(): \"\"\"Creates course objects and persists them to database.\"\"\"", "in range(n): yield random.choice(src) def make_test_courses(): \"\"\"Creates course objects and", "role, c) profile.leader = leader lst.append(profile) return profile try: for", "checks that it fails due to the user lacking proper", "yield random.choice(src) def make_test_courses(): \"\"\"Creates course objects and persists them", "user.\"\"\" client = APIClient() client.force_authenticate(user) return client def request(self, method,", "= section.leader create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds", "as necessary. Returns the created profile. \"\"\" student = give_role(user,", "to the specified endpoint, and checks that it fails due", "response object afterwards. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data", "random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP", "a request to the specified endpoint, and checks that it", "for _ in range(n): yield random.choice(src) def make_test_courses(): \"\"\"Creates course", "profile to database. \"\"\" return ProfileFactory.create( user=user, course=course, leader=None, section=None,", "in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _", "cls.users = users cls.courses = courses cls.coords = coords cls.seniors", "def req_succeeds(self, method, endpoint, data=None): \"\"\" Performs a request to", "provided COURSE, and saves the profile to database. \"\"\" return", "a section of 3-6 students \"\"\" users = iter(make_test_users(NUM_USERS)) courses", "creates blank attendances as necessary. Returns the created profile. \"\"\"", "coords cls.seniors = seniors cls.juniors = juniors cls.students = students", "parameter should be a get/post/etc from an APIClient object. \"\"\"", "course - 4 SMs per coord, each with a section", "from an APIClient object. Returns the response object afterwards. \"\"\"", "SECTION. Also creates blank attendances as necessary. Returns the created", "a section for MENTOR without populated students. \"\"\" return SectionFactory.create(course=mentor.course,", "= [], [], [], [] COORD_COUNT = 2 SM_COUNT =", "per SM, each with a section of 3-6 students \"\"\"", "2 coords per course - 4 SMs per coord, each", "section.leader create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS", "and persists them to database.\"\"\" return [CourseFactory.create(name=name) for name in", "# returns the profile created profile = give_role(next(users), role, c)", "msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self, method, endpoint, data=None): \"\"\"", "profile for USER in a given ROLE for the provided", "= assign(Profile.SENIOR_MENTOR, coord, c, seniors) section = create_empty_section_for(sm) for k", "MENTOR without populated students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user,", "Returns the response object. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED,", "and checks that it fails due to the user lacking", "lacking proper permissions. The method parameter should be a get/post/etc", "specified endpoint, and checks that it succeeds. The method parameter", "user=user, course=course, leader=None, section=None, role=role ) def create_empty_section_for(mentor): \"\"\" Creates", "cls.coords = coords cls.seniors = seniors cls.juniors = juniors cls.students", "= method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if exp_code is not None:", "path from rest_framework import status from rest_framework.test import APIClient import", "populated students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): \"\"\"", "the provided class, retrieved from the database. \"\"\" src =", "per course - 4 SMs per coord, each with a", "\"\"\" return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self,", "def make_test_users(n): \"\"\"Creates N test users and persists them to", "students - 3 JMs per SM, each with a section", "[], [] COORD_COUNT = 2 SM_COUNT = 4 JM_COUNT =", "\"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH = \"/scheduler\" #", "exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method, endpoint, data=None): \"\"\" Performs", "c, juniors) for _ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section)", "test users and persists them to database.\"\"\" return UserFactory.create_batch(n) def", "method. Returns the response object. \"\"\" return self.request( method, endpoint,", "return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): \"\"\" Creates a student", "object that is logged in as the provided user.\"\"\" client", "has one role for now num_courses = len(courses) coords, seniors,", "should be a get/post/etc from an APIClient object. Returns the", "the response object afterwards. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN,", "a student profile for USER, and assigns them to the", "COURSE, and saves the profile to database. \"\"\" return ProfileFactory.create(", "to database. \"\"\" return ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role", "student.section = section student.leader = section.leader create_attendances_for(student) return student def", "to the given SECTION. Also creates blank attendances as necessary.", "NUM_USERS users to the database and initializes profiles for them", "students.append(enroll_user_as_student(next(users), section)) # JMs for k in range(JM_COUNT): jm =", "( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, )", "make_test_users(n): \"\"\"Creates N test users and persists them to database.\"\"\"", "the specified endpoint and returns the response object. Also checks", "from rest_framework.test import APIClient import random from scheduler.models import Profile", "APIClient object that is logged in as the provided user.\"\"\"", "each with a section of 3-6 students - 3 JMs", "specified endpoint and returns the response object. Also checks if", "Profile from scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory,", "one role for now num_courses = len(courses) coords, seniors, juniors,", "data=data) if exp_code is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp))", "= leader lst.append(profile) return profile try: for c in courses:", "parameter should be a get/post/etc from an APIClient object. Returns", "REQUEST UTILITIES ----- def fail_msg(ep, resp): return \"Endpoint: {}\\nResponse Content:", "COURSE_NAMES] def make_test_users(n): \"\"\"Creates N test users and persists them", "response object. Also checks if the status code of the", "client = APIClient() client.force_authenticate(user) return client def request(self, method, endpoint,", "as the provided user.\"\"\" client = APIClient() client.force_authenticate(user) return client", "n=1): \"\"\" Generates N instances of the provided class, retrieved", "assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _ in range(random.randint(3, 6)): students.append(", "get/post/etc from an APIClient object. \"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")),", "endpoint, exp_code=None, data=None): \"\"\" Performs a request to the specified", "the specified endpoint, and checks that it succeeds. The method", "return UserFactory.create_batch(n) def give_role(user, role, course): \"\"\" Creates a profile", "cls.courses = courses cls.coords = coords cls.seniors = seniors cls.juniors", "client.force_authenticate(user) return client def request(self, method, endpoint, exp_code=None, data=None): \"\"\"", "with a section of 3-6 students - 3 JMs per", "return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION -----", "c, seniors) section = create_empty_section_for(sm) for k in range(random.randint(3, 6)):", "N test users and persists them to database.\"\"\" return UserFactory.create_batch(n)", "the provided COURSE, and saves the profile to database. \"\"\"", "response is exp_code, if provided. The method parameter should be", "and returns the response object. Also checks if the status", "ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES = (\"CS88\",", "def give_role(user, role, course): \"\"\" Creates a profile for USER", "data=None): \"\"\" Performs a request to the specified endpoint, and", "3 def assign(role, leader, c, lst): # returns the profile", "for j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors)", "SMs for j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c,", "for now num_courses = len(courses) coords, seniors, juniors, students =", "self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self, method, endpoint,", "SM, each with a section of 3-6 students \"\"\" users", "in a given ROLE for the provided COURSE, and saves", "and checks that it succeeds. The method parameter should be", "created profile. \"\"\" student = give_role(user, Profile.STUDENT, section.course) student.section =", "Profile.ROLE_MAP BASE_PATH = \"/scheduler\" # ----- REQUEST UTILITIES ----- def", "create_empty_section_for(sm) for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs", "Performs a request to the specified endpoint and returns the", "clazz.objects.all() for _ in range(n): yield random.choice(src) def make_test_courses(): \"\"\"Creates", "profile try: for c in courses: # coords for i", "3 JMs per SM, each with a section of 3-6", "it fails due to the endpoint not supporting the provided", "num_courses = len(courses) coords, seniors, juniors, students = [], [],", "specified endpoint, and checks that it fails due to the", "try: for c in courses: # coords for i in", "UserFactory.create_batch(n) def give_role(user, role, course): \"\"\" Creates a profile for", "the given SECTION. Also creates blank attendances as necessary. Returns", "instances of the provided class, retrieved from the database. \"\"\"", "an APIClient object that is logged in as the provided", "return [CourseFactory.create(name=name) for name in COURSE_NAMES] def make_test_users(n): \"\"\"Creates N", "fails due to the endpoint not supporting the provided method.", "return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method,", "give_role(next(users), role, c) profile.leader = leader lst.append(profile) return profile try:", "range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c, coords) # SMs for", "[], [], [] COORD_COUNT = 2 SM_COUNT = 4 JM_COUNT", "student profile for USER, and assigns them to the given", "class APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns an APIClient object that", "for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs for", "Generates N instances of the provided class, retrieved from the", "object. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def", "import random from scheduler.models import Profile from scheduler.factories import (", "request to the specified endpoint, and checks that it succeeds.", "to database.\"\"\" return [CourseFactory.create(name=name) for name in COURSE_NAMES] def make_test_users(n):", "if exp_code is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return", "is exp_code, if provided. The method parameter should be a", "retrieved from the database. \"\"\" src = clazz.objects.all() for _", "in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) ) except StopIteration: pass", "and assigns them to the given SECTION. Also creates blank", "# JMs for k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm,", "\"\"\"Creates course objects and persists them to database.\"\"\" return [CourseFactory.create(name=name)", "----- REQUEST UTILITIES ----- def fail_msg(ep, resp): return \"Endpoint: {}\\nResponse", "make_test_courses() # for sanity tests, everyone only has one role", "profile = give_role(next(users), role, c) profile.leader = leader lst.append(profile) return", "exp_code is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp", "import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for,", "them to database.\"\"\" return [CourseFactory.create(name=name) for name in COURSE_NAMES] def", "APIClient object. \"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if", "necessary. Returns the created profile. \"\"\" student = give_role(user, Profile.STUDENT,", "coords, seniors, juniors, students = [], [], [], [] COORD_COUNT", "data=data ) def req_fails_method(self, method, endpoint, data=None): \"\"\" Performs a", "GENERATION ----- def random_objs(clazz, n=1): \"\"\" Generates N instances of", "----- def random_objs(clazz, n=1): \"\"\" Generates N instances of the", "endpoint and returns the response object. Also checks if the", "4 SMs per coord, each with a section of 3-6", "status code of the response is exp_code, if provided. The", "saves the profile to database. \"\"\" return ProfileFactory.create( user=user, course=course,", "section)) # JMs for k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR,", "from an APIClient object. Returns the response object. \"\"\" return", "provided user.\"\"\" client = APIClient() client.force_authenticate(user) return client def request(self,", "= \"/scheduler\" # ----- REQUEST UTILITIES ----- def fail_msg(ep, resp):", "object. \"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL", "endpoint, and checks that it fails due to the endpoint", "the provided method. Returns the response object. \"\"\" return self.request(", "get/post/etc from an APIClient object. Returns the response object. \"\"\"", "a profile for USER in a given ROLE for the", "def create_empty_section_for(mentor): \"\"\" Creates a section for MENTOR without populated", "c, lst): # returns the profile created profile = give_role(next(users),", "endpoint, and checks that it succeeds. The method parameter should", "APIClient() client.force_authenticate(user) return client def request(self, method, endpoint, exp_code=None, data=None):", "sanity tests, everyone only has one role for now num_courses", ") random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\")", "in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c, coords) # SMs", "django.test import TestCase from os import path from rest_framework import", "\"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): \"\"\" Creates a", "and persists them to database.\"\"\" return UserFactory.create_batch(n) def give_role(user, role,", "= section student.leader = section.leader create_attendances_for(student) return student def gen_test_data(cls,", "code of the response is exp_code, if provided. The method", "be a get/post/etc from an APIClient object. Returns the response", "return student def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS users to", "provided. The method parameter should be a get/post/etc from an", "CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0)", "students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): \"\"\" Creates", "StopIteration: pass cls.users = users cls.courses = courses cls.coords =", "that it fails due to the endpoint not supporting the", "rest_framework.test import APIClient import random from scheduler.models import Profile from", "SMs per coord, each with a section of 3-6 students", "not supporting the provided method. Returns the response object. \"\"\"", "ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role ) def create_empty_section_for(mentor): \"\"\"", "endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method, endpoint, data=None): \"\"\"", "profile. \"\"\" student = give_role(user, Profile.STUDENT, section.course) student.section = section", "def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS users to the database", "[], [], [], [] COORD_COUNT = 2 SM_COUNT = 4", "for sanity tests, everyone only has one role for now", "for USER in a given ROLE for the provided COURSE,", "\"\"\" Performs a request to the specified endpoint, and checks", "rest_framework import status from rest_framework.test import APIClient import random from", "def fail_msg(ep, resp): return \"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content) class", "{}\\nResponse Content: {}\".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns", "range(n): yield random.choice(src) def make_test_courses(): \"\"\"Creates course objects and persists", "resp)) return resp def req_fails_perms(self, method, endpoint, data=None): \"\"\" Performs", "object. Returns the response object. \"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK,", "= 2 SM_COUNT = 4 JM_COUNT = 3 def assign(role,", "method, endpoint, exp_code=None, data=None): \"\"\" Performs a request to the", "create_attendances_for, ) random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\",", "method, endpoint, data=None): \"\"\" Performs a request to the specified", "\"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH = \"/scheduler\" # ----- REQUEST", "self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method, endpoint,", "in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors) section =", "src = clazz.objects.all() for _ in range(n): yield random.choice(src) def", "now num_courses = len(courses) coords, seniors, juniors, students = [],", "\"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if exp_code is", "the specified endpoint, and checks that it fails due to", "them to database.\"\"\" return UserFactory.create_batch(n) def give_role(user, role, course): \"\"\"", "endpoint, data=None): \"\"\" Performs a request to the specified endpoint,", "= iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for sanity tests, everyone", "course objects and persists them to database.\"\"\" return [CourseFactory.create(name=name) for", "as follows: - 2 coords per course - 4 SMs", "\"\"\" return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self,", "len(courses) coords, seniors, juniors, students = [], [], [], []", "Also checks if the status code of the response is", "k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for", "if the status code of the response is exp_code, if", "= 3 def assign(role, leader, c, lst): # returns the", "for c in courses: # coords for i in range(COORD_COUNT):", "persists them to database.\"\"\" return UserFactory.create_batch(n) def give_role(user, role, course):", "logged in as the provided user.\"\"\" client = APIClient() client.force_authenticate(user)", "the response object. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data", "UTILITIES ----- def fail_msg(ep, resp): return \"Endpoint: {}\\nResponse Content: {}\".format(ep,", "lst.append(profile) return profile try: for c in courses: # coords", "The method parameter should be a get/post/etc from an APIClient", "a section of 3-6 students - 3 JMs per SM,", "a get/post/etc from an APIClient object. \"\"\" resp = method(path.join(BASE_PATH,", "6)): students.append(enroll_user_as_student(next(users), section)) # JMs for k in range(JM_COUNT): jm", "exp_code, msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self, method, endpoint, data=None):", "\"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH = \"/scheduler\" # -----", "range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs for k in range(JM_COUNT):", "for the provided COURSE, and saves the profile to database.", "response object. \"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # -----", "and checks that it fails due to the endpoint not", "coords) # SMs for j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR,", "them to the given SECTION. Also creates blank attendances as", "section.course) student.section = section student.leader = section.leader create_attendances_for(student) return student", "user): \"\"\"Returns an APIClient object that is logged in as", "name in COURSE_NAMES] def make_test_users(n): \"\"\"Creates N test users and", "BASE_PATH = \"/scheduler\" # ----- REQUEST UTILITIES ----- def fail_msg(ep,", "exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION ----- def random_objs(clazz, n=1):", "a request to the specified endpoint and returns the response", "SM_COUNT = 4 JM_COUNT = 3 def assign(role, leader, c,", "in courses: # coords for i in range(COORD_COUNT): coord =", "it succeeds. The method parameter should be a get/post/etc from", "c in courses: # coords for i in range(COORD_COUNT): coord", "request to the specified endpoint and returns the response object.", "APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns an APIClient object that is", "give_role(user, role, course): \"\"\" Creates a profile for USER in", "= create_empty_section_for(sm) for k in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) #", "resp.content) class APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns an APIClient object", "due to the user lacking proper permissions. The method parameter", "UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES =", "tests, everyone only has one role for now num_courses =", "return ProfileFactory.create( user=user, course=course, leader=None, section=None, role=role ) def create_empty_section_for(mentor):", "profile.leader = leader lst.append(profile) return profile try: for c in", "per coord, each with a section of 3-6 students -", "the response is exp_code, if provided. The method parameter should", "in range(random.randint(3, 6)): students.append(enroll_user_as_student(next(users), section)) # JMs for k in", "course): \"\"\" Creates a profile for USER in a given", "from scheduler.models import Profile from scheduler.factories import ( CourseFactory, SpacetimeFactory,", "= Profile.ROLE_MAP BASE_PATH = \"/scheduler\" # ----- REQUEST UTILITIES -----", "to the specified endpoint and returns the response object. Also", "data=data) # ----- MODEL GENERATION ----- def random_objs(clazz, n=1): \"\"\"", "= users cls.courses = courses cls.coords = coords cls.seniors =", "the provided user.\"\"\" client = APIClient() client.force_authenticate(user) return client def", "gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS users to the database and", "iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for sanity tests, everyone only", "TestCase from os import path from rest_framework import status from", ") except StopIteration: pass cls.users = users cls.courses = courses", "SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory, OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES", "\"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH = \"/scheduler\"", "jm = assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _ in range(random.randint(3,", "section=None, role=role ) def create_empty_section_for(mentor): \"\"\" Creates a section for", ") def req_succeeds(self, method, endpoint, data=None): \"\"\" Performs a request", "objects and persists them to database.\"\"\" return [CourseFactory.create(name=name) for name", "and initializes profiles for them as follows: - 2 coords", "if provided. The method parameter should be a get/post/etc from", "to the specified endpoint, and checks that it succeeds. The", "def request(self, method, endpoint, exp_code=None, data=None): \"\"\" Performs a request", "COORD_COUNT = 2 SM_COUNT = 4 JM_COUNT = 3 def", "enroll_user_as_student(next(users), section) ) except StopIteration: pass cls.users = users cls.courses", "assign(Profile.COORDINATOR, None, c, coords) # SMs for j in range(SM_COUNT):", "from rest_framework import status from rest_framework.test import APIClient import random", "APIClient object. Returns the response object afterwards. \"\"\" return self.request(", "sm, c, juniors) for _ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users),", "coord, each with a section of 3-6 students - 3", "endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION ----- def random_objs(clazz,", "an APIClient object. Returns the response object. \"\"\" return self.request(method,", "None, c, coords) # SMs for j in range(SM_COUNT): sm", "only has one role for now num_courses = len(courses) coords,", "Returns the created profile. \"\"\" student = give_role(user, Profile.STUDENT, section.course)", "= courses cls.coords = coords cls.seniors = seniors cls.juniors =", "an APIClient object. \"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data)", "create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS users", "2 SM_COUNT = 4 JM_COUNT = 3 def assign(role, leader,", "courses: # coords for i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR,", "request to the specified endpoint, and checks that it fails", "lst): # returns the profile created profile = give_role(next(users), role,", "fail_msg(ep, resp): return \"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content) class APITestCase(TestCase):", "coord = assign(Profile.COORDINATOR, None, c, coords) # SMs for j", "self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def req_fails_method(self, method, endpoint,", "coords for i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c,", "# SMs for j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord,", "given ROLE for the provided COURSE, and saves the profile", "= clazz.objects.all() for _ in range(n): yield random.choice(src) def make_test_courses():", "the profile created profile = give_role(next(users), role, c) profile.leader =", "to the user lacking proper permissions. The method parameter should", "object. Also checks if the status code of the response", "4 JM_COUNT = 3 def assign(role, leader, c, lst): #", "req_fails_perms(self, method, endpoint, data=None): \"\"\" Performs a request to the", "assign(role, leader, c, lst): # returns the profile created profile", "return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method,", "from the database. \"\"\" src = clazz.objects.all() for _ in", "from os import path from rest_framework import status from rest_framework.test", "USER, and assigns them to the given SECTION. Also creates", "MODEL GENERATION ----- def random_objs(clazz, n=1): \"\"\" Generates N instances", "get_client_for(self, user): \"\"\"Returns an APIClient object that is logged in", "for USER, and assigns them to the given SECTION. Also", "afterwards. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_403_FORBIDDEN, data=data ) def", "import status from rest_framework.test import APIClient import random from scheduler.models", "courses cls.coords = coords cls.seniors = seniors cls.juniors = juniors", "from scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory, SectionFactory, AttendanceFactory,", "- 3 JMs per SM, each with a section of", "= (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP", "Content: {}\".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns an", "response object. \"\"\" return self.request( method, endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data )", "from django.test import TestCase from os import path from rest_framework", "create_empty_section_for(mentor): \"\"\" Creates a section for MENTOR without populated students.", "= assign(Profile.JUNIOR_MENTOR, sm, c, juniors) for _ in range(random.randint(3, 6)):", "to the database and initializes profiles for them as follows:", "student = give_role(user, Profile.STUDENT, section.course) student.section = section student.leader =", "in COURSE_NAMES] def make_test_users(n): \"\"\"Creates N test users and persists", "seniors, juniors, students = [], [], [], [] COORD_COUNT =", "OverrideFactory, create_attendances_for, ) random.seed(0) COURSE_NAMES = (\"CS88\", \"CS61A\", \"CS61B\", \"CS70\",", "\"\"\" src = clazz.objects.all() for _ in range(n): yield random.choice(src)", "import TestCase from os import path from rest_framework import status", "scheduler.models import Profile from scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory,", "initializes profiles for them as follows: - 2 coords per", "return resp def req_fails_perms(self, method, endpoint, data=None): \"\"\" Performs a", "it fails due to the user lacking proper permissions. The", "\"\"\" Performs a request to the specified endpoint and returns", "\"\"\" Creates a profile for USER in a given ROLE", "import APIClient import random from scheduler.models import Profile from scheduler.factories", "(\"CS88\", \"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH", ") def create_empty_section_for(mentor): \"\"\" Creates a section for MENTOR without", "proper permissions. The method parameter should be a get/post/etc from", "= APIClient() client.force_authenticate(user) return client def request(self, method, endpoint, exp_code=None,", "object. \"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if exp_code", "class, retrieved from the database. \"\"\" src = clazz.objects.all() for", "endpoint, and checks that it fails due to the user", "everyone only has one role for now num_courses = len(courses)", "the created profile. \"\"\" student = give_role(user, Profile.STUDENT, section.course) student.section", "for i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c, coords)", "method parameter should be a get/post/etc from an APIClient object.", "def get_client_for(self, user): \"\"\"Returns an APIClient object that is logged", "profile for USER, and assigns them to the given SECTION.", "follow=True, data=data) if exp_code is not None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint,", "to the endpoint not supporting the provided method. Returns the", "method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True, data=data) if exp_code is not None: self.assertEqual(resp.status_code,", "[] COORD_COUNT = 2 SM_COUNT = 4 JM_COUNT = 3", "Creates a section for MENTOR without populated students. \"\"\" return", "returns the profile created profile = give_role(next(users), role, c) profile.leader", "return \"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self,", "{}\".format(ep, resp.content) class APITestCase(TestCase): def get_client_for(self, user): \"\"\"Returns an APIClient", "the database. \"\"\" src = clazz.objects.all() for _ in range(n):", "random from scheduler.models import Profile from scheduler.factories import ( CourseFactory,", "fails due to the user lacking proper permissions. The method", "from an APIClient object. \"\"\" resp = method(path.join(BASE_PATH, endpoint.strip(\"/\")), follow=True,", "assign(Profile.SENIOR_MENTOR, coord, c, seniors) section = create_empty_section_for(sm) for k in", "[CourseFactory.create(name=name) for name in COURSE_NAMES] def make_test_users(n): \"\"\"Creates N test", "None: self.assertEqual(resp.status_code, exp_code, msg=fail_msg(endpoint, resp)) return resp def req_fails_perms(self, method,", "\"/scheduler\" # ----- REQUEST UTILITIES ----- def fail_msg(ep, resp): return", "a get/post/etc from an APIClient object. Returns the response object.", "\"CS61A\", \"CS61B\", \"CS70\", \"CS61C\", \"EE16A\") ROLE_MAP = Profile.ROLE_MAP BASE_PATH =", "database.\"\"\" return [CourseFactory.create(name=name) for name in COURSE_NAMES] def make_test_users(n): \"\"\"Creates", "of 3-6 students \"\"\" users = iter(make_test_users(NUM_USERS)) courses = make_test_courses()", "----- MODEL GENERATION ----- def random_objs(clazz, n=1): \"\"\" Generates N", "profile created profile = give_role(next(users), role, c) profile.leader = leader", "profiles for them as follows: - 2 coords per course", "student.leader = section.leader create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300): \"\"\"", "for name in COURSE_NAMES] def make_test_users(n): \"\"\"Creates N test users", "persists them to database.\"\"\" return [CourseFactory.create(name=name) for name in COURSE_NAMES]", "checks that it succeeds. The method parameter should be a", "exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method, endpoint, data=None): \"\"\" Performs", "pass cls.users = users cls.courses = courses cls.coords = coords", "section for MENTOR without populated students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor)", "c, coords) # SMs for j in range(SM_COUNT): sm =", "JM_COUNT = 3 def assign(role, leader, c, lst): # returns", "coords per course - 4 SMs per coord, each with", "user lacking proper permissions. The method parameter should be a", "them as follows: - 2 coords per course - 4", "checks if the status code of the response is exp_code,", "an APIClient object. Returns the response object afterwards. \"\"\" return", "students.append( enroll_user_as_student(next(users), section) ) except StopIteration: pass cls.users = users", "due to the endpoint not supporting the provided method. Returns", "endpoint not supporting the provided method. Returns the response object.", "the database and initializes profiles for them as follows: -", "the response object. Also checks if the status code of", "database.\"\"\" return UserFactory.create_batch(n) def give_role(user, role, course): \"\"\" Creates a", "without populated students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section):", "ROLE_MAP = Profile.ROLE_MAP BASE_PATH = \"/scheduler\" # ----- REQUEST UTILITIES", "Performs a request to the specified endpoint, and checks that", "# ----- MODEL GENERATION ----- def random_objs(clazz, n=1): \"\"\" Generates", "random.choice(src) def make_test_courses(): \"\"\"Creates course objects and persists them to", "exp_code, if provided. The method parameter should be a get/post/etc", "- 4 SMs per coord, each with a section of", "that it fails due to the user lacking proper permissions.", "# for sanity tests, everyone only has one role for", "role for now num_courses = len(courses) coords, seniors, juniors, students", "NUM_USERS=300): \"\"\" Adds NUM_USERS users to the database and initializes", "sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors) section = create_empty_section_for(sm) for", "\"\"\" student = give_role(user, Profile.STUDENT, section.course) student.section = section student.leader", "created profile = give_role(next(users), role, c) profile.leader = leader lst.append(profile)", "= give_role(user, Profile.STUDENT, section.course) student.section = section student.leader = section.leader", "each with a section of 3-6 students \"\"\" users =", "APIClient object. Returns the response object. \"\"\" return self.request(method, endpoint,", "resp): return \"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content) class APITestCase(TestCase): def", "self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data) # ----- MODEL GENERATION ----- def", "leader=None, section=None, role=role ) def create_empty_section_for(mentor): \"\"\" Creates a section", "juniors) for _ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) )", "exp_code=None, data=None): \"\"\" Performs a request to the specified endpoint", "endpoint.strip(\"/\")), follow=True, data=data) if exp_code is not None: self.assertEqual(resp.status_code, exp_code,", "in as the provided user.\"\"\" client = APIClient() client.force_authenticate(user) return", "given SECTION. Also creates blank attendances as necessary. Returns the", "\"\"\" users = iter(make_test_users(NUM_USERS)) courses = make_test_courses() # for sanity", "for MENTOR without populated students. \"\"\" return SectionFactory.create(course=mentor.course, mentor=mentor) def", "to database.\"\"\" return UserFactory.create_batch(n) def give_role(user, role, course): \"\"\" Creates", "----- def fail_msg(ep, resp): return \"Endpoint: {}\\nResponse Content: {}\".format(ep, resp.content)", "succeeds. The method parameter should be a get/post/etc from an", "_ in range(n): yield random.choice(src) def make_test_courses(): \"\"\"Creates course objects", "\"\"\" Adds NUM_USERS users to the database and initializes profiles", "users to the database and initializes profiles for them as", "= assign(Profile.COORDINATOR, None, c, coords) # SMs for j in", "resp def req_fails_perms(self, method, endpoint, data=None): \"\"\" Performs a request", "def assign(role, leader, c, lst): # returns the profile created", "object. Returns the response object afterwards. \"\"\" return self.request( method,", "section) ) except StopIteration: pass cls.users = users cls.courses =", "the endpoint not supporting the provided method. Returns the response", "of the response is exp_code, if provided. The method parameter", "database and initializes profiles for them as follows: - 2", "coord, c, seniors) section = create_empty_section_for(sm) for k in range(random.randint(3,", "permissions. The method parameter should be a get/post/etc from an", "assigns them to the given SECTION. Also creates blank attendances", "give_role(user, Profile.STUDENT, section.course) student.section = section student.leader = section.leader create_attendances_for(student)", "for _ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) ) except", "req_fails_method(self, method, endpoint, data=None): \"\"\" Performs a request to the", "\"\"\"Creates N test users and persists them to database.\"\"\" return", "N instances of the provided class, retrieved from the database.", "student def gen_test_data(cls, NUM_USERS=300): \"\"\" Adds NUM_USERS users to the", "section student.leader = section.leader create_attendances_for(student) return student def gen_test_data(cls, NUM_USERS=300):", "provided class, retrieved from the database. \"\"\" src = clazz.objects.all()", "SectionFactory.create(course=mentor.course, mentor=mentor) def enroll_user_as_student(user, section): \"\"\" Creates a student profile", "import Profile from scheduler.factories import ( CourseFactory, SpacetimeFactory, UserFactory, ProfileFactory,", "role, course): \"\"\" Creates a profile for USER in a", "users and persists them to database.\"\"\" return UserFactory.create_batch(n) def give_role(user,", "return client def request(self, method, endpoint, exp_code=None, data=None): \"\"\" Performs", "return profile try: for c in courses: # coords for", "request(self, method, endpoint, exp_code=None, data=None): \"\"\" Performs a request to", "leader lst.append(profile) return profile try: for c in courses: #", "i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None, c, coords) #", "\"\"\" Generates N instances of the provided class, retrieved from", "Creates a profile for USER in a given ROLE for", "status from rest_framework.test import APIClient import random from scheduler.models import", "get/post/etc from an APIClient object. Returns the response object afterwards.", "_ in range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) ) except StopIteration:", "section): \"\"\" Creates a student profile for USER, and assigns", "of 3-6 students - 3 JMs per SM, each with", "follows: - 2 coords per course - 4 SMs per", "# ----- REQUEST UTILITIES ----- def fail_msg(ep, resp): return \"Endpoint:", "range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors) section = create_empty_section_for(sm)", "attendances as necessary. Returns the created profile. \"\"\" student =", "Creates a student profile for USER, and assigns them to", "endpoint, exp_code=status.HTTP_405_METHOD_NOT_ALLOWED, data=data ) def req_succeeds(self, method, endpoint, data=None): \"\"\"", "JMs for k in range(JM_COUNT): jm = assign(Profile.JUNIOR_MENTOR, sm, c,", "APIClient import random from scheduler.models import Profile from scheduler.factories import", "Profile.STUDENT, section.course) student.section = section student.leader = section.leader create_attendances_for(student) return", "range(random.randint(3, 6)): students.append( enroll_user_as_student(next(users), section) ) except StopIteration: pass cls.users", "\"\"\"Returns an APIClient object that is logged in as the", "be a get/post/etc from an APIClient object. \"\"\" resp =", "# coords for i in range(COORD_COUNT): coord = assign(Profile.COORDINATOR, None,", "j in range(SM_COUNT): sm = assign(Profile.SENIOR_MENTOR, coord, c, seniors) section", "Returns the response object. \"\"\" return self.request(method, endpoint, exp_code=status.HTTP_200_OK, data=data)", "os import path from rest_framework import status from rest_framework.test import", "ROLE for the provided COURSE, and saves the profile to", "def req_fails_method(self, method, endpoint, data=None): \"\"\" Performs a request to", "make_test_courses(): \"\"\"Creates course objects and persists them to database.\"\"\" return" ]
[ "p2, p3) if _length + length_arc < end: _length +=", "if not ended: out.append((t, pts)) if out[-1][0] != \"endPath\": out.append((\"endPath\",[]))", "end: ended = True out.append((\"curveTo\", a[1:])) else: t += inc", "__length_cache[abcd] = l return l class CurveCutter(): def __init__(self, g,", "if _length + length_arc < end: _length += length_arc else:", "def raise_quadratic(start, a, b): c0 = start c1 = (c0[0]", "= RecordingPen() g.draw(self.pen) self.inc = inc self.length = self.calcCurveLength() def", "b[1])) c3 = (b[0], b[1]) return [c1, c2, c3] __length_cache", "enumerate(self.pen.value): if t == \"curveTo\": p1, p2, p3 = pts", "fontTools.pens.recordingPen import RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from", "= pts p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2,", "if out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return out def subsegmentPoint(self, start=0,", "== \"lineTo\": pass # todo return length def subsegment(self, start=None,", "lc: return lc else: l = calcCubicArcLength(a, b, c, d)", "length_arc = calcCubicArcLength_cached(p0, p1, p2, p3) if _length + length_arc", "= self.calcCurveLength() def calcCurveLength(self): length = 0 for i, (t,", "= math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5) return", "= s return s def calcCubicArcLength_cached(a, b, c, d): #return", "end=end) try: t, (a, b, c) = subsegment[-2] tangent =", "b = splitCubicAtT_cached(p0, p1, p2, p3, t) length_a = calcCubicArcLength_cached(*a)", "todo return length def subsegment(self, start=None, end=None): global __cut_cache inc", "calcCubicArcLength(a, b, c, d) __length_cache[abcd] = l return l class", "- c0[0]), c0[1] + (2/3)*(a[1] - c0[1])) c2 = (b[0]", "t) length_a = calcCubicArcLength_cached(*a) if _length + length_a > end:", "0 for i, (t, pts) in enumerate(self.pen.value): if t ==", "import math from fontTools.pens.recordingPen import RecordingPen, replayRecording from fontTools.misc.bezierTools import", "l return l class CurveCutter(): def __init__(self, g, inc=0.0015): if", "def subsegment(self, start=None, end=None): global __cut_cache inc = self.inc length", "calcCubicArcLength_cached(p0, p1, p2, p3) if _length + length_arc < end:", "+ math.pi*.5) return c, tangent except ValueError: return None, None", "subsegmentPoint(self, start=0, end=1): inc = self.inc subsegment = self.subsegment(start=start, end=end)", "= self.inc length = self.length ended = False _length =", "calcCubicArcLength(a, b, c, d) global __length_cache abcd = (a, b,", "[c1, c2, c3] __length_cache = {} __split_cache = {} def", "d) __length_cache[abcd] = l return l class CurveCutter(): def __init__(self,", "RecordingPen() g.draw(self.pen) self.inc = inc self.length = self.calcCurveLength() def calcCurveLength(self):", "b[0]) + math.pi*.5) return c, tangent except ValueError: return None,", "b, c, d): #return calcCubicArcLength(a, b, c, d) global __length_cache", "b, c, d) global __length_cache abcd = (a, b, c,", "l class CurveCutter(): def __init__(self, g, inc=0.0015): if isinstance(g, RecordingPen):", "p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2, p3) elif", "while not ended: a, b = splitCubicAtT_cached(p0, p1, p2, p3,", "length = 0 for i, (t, pts) in enumerate(self.pen.value): if", "b, c, d) lc = __length_cache.get(abcd) if lc: return lc", "a, b = splitCubicAtT_cached(p0, p1, p2, p3, t) length_a =", "+ length_a > end: ended = True out.append((\"curveTo\", a[1:])) else:", "return l class CurveCutter(): def __init__(self, g, inc=0.0015): if isinstance(g,", "t == \"curveTo\": p1, p2, p3 = pts p0 =", "tries += 1 if t == \"lineTo\": pass # TODO", "t) __split_cache[abcdt] = s return s def calcCubicArcLength_cached(a, b, c,", "= (a, b, c, d, t) sc = __split_cache.get(abcdt) if", "start c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] +", "= g else: self.pen = RecordingPen() g.draw(self.pen) self.inc = inc", "= (a, b, c, d) lc = __length_cache.get(abcd) if lc:", "length_a = calcCubicArcLength_cached(*a) if _length + length_a > end: ended", "__split_cache[abcdt] = s return s def calcCubicArcLength_cached(a, b, c, d):", "# TODO if not ended: out.append((t, pts)) if out[-1][0] !=", "RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry import", "== \"curveTo\": p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1]", "inc tries = 0 while not ended: a, b =", "return out def subsegmentPoint(self, start=0, end=1): inc = self.inc subsegment", "try: t, (a, b, c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1]", "True out.append((\"curveTo\", a[1:])) else: t += inc tries += 1", "= self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2, p3) if _length", "for i, (t, pts) in enumerate(self.pen.value): if t == \"curveTo\":", "= calcCubicArcLength_cached(p0, p1, p2, p3) if _length + length_arc <", "if sc: return sc else: s = splitCubicAtT(a, b, c,", "calcCubicArcLength_cached(*a) if _length + length_a > end: ended = True", "p1, p2, p3) elif t == \"lineTo\": pass # todo", "= self.length ended = False _length = 0 out =", "a, b): c0 = start c1 = (c0[0] + (2/3)*(a[0]", "t = inc tries = 0 while not ended: a,", "not ended: a, b = splitCubicAtT_cached(p0, p1, p2, p3, t)", "RecordingPen): self.pen = g else: self.pen = RecordingPen() g.draw(self.pen) self.inc", "+= length_arc else: t = inc tries = 0 while", "= start c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1]", "sc: return sc else: s = splitCubicAtT(a, b, c, d,", "< end: _length += length_arc else: t = inc tries", "= [] for i, (t, pts) in enumerate(self.pen.value): if t", "\"lineTo\": pass # TODO if not ended: out.append((t, pts)) if", "p3, t) length_a = calcCubicArcLength_cached(*a) if _length + length_a >", "c[0] - b[0]) + math.pi*.5) return c, tangent except ValueError:", "def calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a, b, c, d)", "inc tries += 1 if t == \"lineTo\": pass #", "lc else: l = calcCubicArcLength(a, b, c, d) __length_cache[abcd] =", "else: self.pen = RecordingPen() g.draw(self.pen) self.inc = inc self.length =", "length += calcCubicArcLength_cached(p0, p1, p2, p3) elif t == \"lineTo\":", "out = [] for i, (t, pts) in enumerate(self.pen.value): if", "{} def splitCubicAtT_cached(a, b, c, d, t): global __split_cache abcdt", "= (b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] -", "= self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2, p3) elif t", "splitCubicAtT(a, b, c, d, t) __split_cache[abcdt] = s return s", "False _length = 0 out = [] for i, (t,", "p1, p2, p3, t) length_a = calcCubicArcLength_cached(*a) if _length +", "replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect,", "out def subsegmentPoint(self, start=0, end=1): inc = self.inc subsegment =", "else: t += inc tries += 1 if t ==", "pass # TODO if not ended: out.append((t, pts)) if out[-1][0]", "(b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1]))", "= True out.append((\"curveTo\", a[1:])) else: t += inc tries +=", "p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length_arc =", "c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1], c[0] -", "!= \"endPath\": out.append((\"endPath\",[])) return out def subsegmentPoint(self, start=0, end=1): inc", "CurveCutter(): def __init__(self, g, inc=0.0015): if isinstance(g, RecordingPen): self.pen =", "b): c0 = start c1 = (c0[0] + (2/3)*(a[0] -", "global __cut_cache inc = self.inc length = self.length ended =", "= self.subsegment(start=start, end=end) try: t, (a, b, c) = subsegment[-2]", "else: s = splitCubicAtT(a, b, c, d, t) __split_cache[abcdt] =", "t, (a, b, c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1] -", "return [c1, c2, c3] __length_cache = {} __split_cache = {}", "__length_cache abcd = (a, b, c, d) lc = __length_cache.get(abcd)", "= 0 while not ended: a, b = splitCubicAtT_cached(p0, p1,", "t) sc = __split_cache.get(abcdt) if sc: return sc else: s", "(2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1])) c2 =", "else: t = inc tries = 0 while not ended:", "c, d, t) __split_cache[abcdt] = s return s def calcCubicArcLength_cached(a,", "self.length ended = False _length = 0 out = []", "c0[0]), c0[1] + (2/3)*(a[1] - c0[1])) c2 = (b[0] +", "= __split_cache.get(abcdt) if sc: return sc else: s = splitCubicAtT(a,", "self.calcCurveLength() def calcCurveLength(self): length = 0 for i, (t, pts)", "p3) elif t == \"lineTo\": pass # todo return length", "__length_cache.get(abcd) if lc: return lc else: l = calcCubicArcLength(a, b,", "= splitCubicAtT_cached(p0, p1, p2, p3, t) length_a = calcCubicArcLength_cached(*a) if", "out.append((\"curveTo\", a[1:])) else: t += inc tries += 1 if", "p3) if _length + length_arc < end: _length += length_arc", "import RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry", "d, t) sc = __split_cache.get(abcdt) if sc: return sc else:", "(c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1]))", "if lc: return lc else: l = calcCubicArcLength(a, b, c,", "ended = False _length = 0 out = [] for", "_length + length_arc < end: _length += length_arc else: t", "- b[0]), b[1] + (2/3)*(a[1] - b[1])) c3 = (b[0],", "splitCubicAtT_cached(p0, p1, p2, p3, t) length_a = calcCubicArcLength_cached(*a) if _length", "#return calcCubicArcLength(a, b, c, d) global __length_cache abcd = (a,", "\"curveTo\": p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length_arc", "def calcCurveLength(self): length = 0 for i, (t, pts) in", "0 while not ended: a, b = splitCubicAtT_cached(p0, p1, p2,", "+ (2/3)*(a[1] - c0[1])) c2 = (b[0] + (2/3)*(a[0] -", "+ (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1])) c3", "subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) +", "s = splitCubicAtT(a, b, c, d, t) __split_cache[abcdt] = s", "length_arc else: t = inc tries = 0 while not", "d, t) __split_cache[abcdt] = s return s def calcCubicArcLength_cached(a, b,", "- c0[1])) c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1]", "import calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect, Point def raise_quadratic(start,", "start=0, end=1): inc = self.inc subsegment = self.subsegment(start=start, end=end) try:", "length def subsegment(self, start=None, end=None): global __cut_cache inc = self.inc", "coldtype.geometry import Rect, Point def raise_quadratic(start, a, b): c0 =", "= {} __split_cache = {} def splitCubicAtT_cached(a, b, c, d,", "p3 = pts p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1,", "g.draw(self.pen) self.inc = inc self.length = self.calcCurveLength() def calcCurveLength(self): length", "(b[0], b[1]) return [c1, c2, c3] __length_cache = {} __split_cache", "p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0,", "from fontTools.pens.recordingPen import RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT", "= l return l class CurveCutter(): def __init__(self, g, inc=0.0015):", "(2/3)*(a[1] - c0[1])) c2 = (b[0] + (2/3)*(a[0] - b[0]),", "c2, c3] __length_cache = {} __split_cache = {} def splitCubicAtT_cached(a,", "c, d) lc = __length_cache.get(abcd) if lc: return lc else:", "= calcCubicArcLength_cached(*a) if _length + length_a > end: ended =", "= self.inc subsegment = self.subsegment(start=start, end=end) try: t, (a, b,", "- b[1], c[0] - b[0]) + math.pi*.5) return c, tangent", "fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect, Point def", "(t, pts) in enumerate(self.pen.value): if t == \"curveTo\": p1, p2,", "if t == \"curveTo\": p1, p2, p3 = pts p0", "end=1): inc = self.inc subsegment = self.subsegment(start=start, end=end) try: t,", "= pts p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2,", "b, c, d) __length_cache[abcd] = l return l class CurveCutter():", "= (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] -", "ended: out.append((t, pts)) if out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return out", "{} __split_cache = {} def splitCubicAtT_cached(a, b, c, d, t):", "splitCubicAtT_cached(a, b, c, d, t): global __split_cache abcdt = (a,", "d): #return calcCubicArcLength(a, b, c, d) global __length_cache abcd =", "= False _length = 0 out = [] for i,", "b[1]) return [c1, c2, c3] __length_cache = {} __split_cache =", "(a, b, c, d, t) sc = __split_cache.get(abcdt) if sc:", "self.pen = RecordingPen() g.draw(self.pen) self.inc = inc self.length = self.calcCurveLength()", "c, d, t) sc = __split_cache.get(abcdt) if sc: return sc", "Rect, Point def raise_quadratic(start, a, b): c0 = start c1", "= 0 out = [] for i, (t, pts) in", "calcCubicArcLength_cached(p0, p1, p2, p3) elif t == \"lineTo\": pass #", "+= 1 if t == \"lineTo\": pass # TODO if", "== \"lineTo\": pass # TODO if not ended: out.append((t, pts))", "pts p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2, p3)", "> end: ended = True out.append((\"curveTo\", a[1:])) else: t +=", "t == \"lineTo\": pass # TODO if not ended: out.append((t,", "subsegment(self, start=None, end=None): global __cut_cache inc = self.inc length =", "= inc tries = 0 while not ended: a, b", "l = calcCubicArcLength(a, b, c, d) __length_cache[abcd] = l return", "if _length + length_a > end: ended = True out.append((\"curveTo\",", "c0[1] + (2/3)*(a[1] - c0[1])) c2 = (b[0] + (2/3)*(a[0]", "isinstance(g, RecordingPen): self.pen = g else: self.pen = RecordingPen() g.draw(self.pen)", "out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return out def subsegmentPoint(self, start=0, end=1):", "p1, p2, p3) if _length + length_arc < end: _length", "g else: self.pen = RecordingPen() g.draw(self.pen) self.inc = inc self.length", "d, t): global __split_cache abcdt = (a, b, c, d,", "p2, p3) elif t == \"lineTo\": pass # todo return", "return s def calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a, b,", "s return s def calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a,", "length = self.length ended = False _length = 0 out", "p3 = pts p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1,", "b[0]), b[1] + (2/3)*(a[1] - b[1])) c3 = (b[0], b[1])", "c, d) __length_cache[abcd] = l return l class CurveCutter(): def", "self.length = self.calcCurveLength() def calcCurveLength(self): length = 0 for i,", "self.inc length = self.length ended = False _length = 0", "b, c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1], c[0]", "= (b[0], b[1]) return [c1, c2, c3] __length_cache = {}", "self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2, p3) if _length +", "b, c, d, t) __split_cache[abcdt] = s return s def", "c, d, t): global __split_cache abcdt = (a, b, c,", "pts p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2, p3)", "length_arc < end: _length += length_arc else: t = inc", "def subsegmentPoint(self, start=0, end=1): inc = self.inc subsegment = self.subsegment(start=start,", "self.inc subsegment = self.subsegment(start=start, end=end) try: t, (a, b, c)", "(2/3)*(a[1] - b[1])) c3 = (b[0], b[1]) return [c1, c2,", "out.append((t, pts)) if out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return out def", "sc else: s = splitCubicAtT(a, b, c, d, t) __split_cache[abcdt]", "+ (2/3)*(a[1] - b[1])) c3 = (b[0], b[1]) return [c1,", "inc self.length = self.calcCurveLength() def calcCurveLength(self): length = 0 for", "# todo return length def subsegment(self, start=None, end=None): global __cut_cache", "return length def subsegment(self, start=None, end=None): global __cut_cache inc =", "global __length_cache abcd = (a, b, c, d) lc =", "self.subsegment(start=start, end=end) try: t, (a, b, c) = subsegment[-2] tangent", "d) lc = __length_cache.get(abcd) if lc: return lc else: l", "__length_cache = {} __split_cache = {} def splitCubicAtT_cached(a, b, c,", "= 0 for i, (t, pts) in enumerate(self.pen.value): if t", "_length += length_arc else: t = inc tries = 0", "end=None): global __cut_cache inc = self.inc length = self.length ended", "c, d) global __length_cache abcd = (a, b, c, d)", "p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0,", "TODO if not ended: out.append((t, pts)) if out[-1][0] != \"endPath\":", "pts) in enumerate(self.pen.value): if t == \"curveTo\": p1, p2, p3", "calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect, Point def raise_quadratic(start, a,", "global __split_cache abcdt = (a, b, c, d, t) sc", "ended: a, b = splitCubicAtT_cached(p0, p1, p2, p3, t) length_a", "= subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0])", "p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length +=", "inc=0.0015): if isinstance(g, RecordingPen): self.pen = g else: self.pen =", "abcdt = (a, b, c, d, t) sc = __split_cache.get(abcdt)", "class CurveCutter(): def __init__(self, g, inc=0.0015): if isinstance(g, RecordingPen): self.pen", "t): global __split_cache abcdt = (a, b, c, d, t)", "calcCurveLength(self): length = 0 for i, (t, pts) in enumerate(self.pen.value):", "__init__(self, g, inc=0.0015): if isinstance(g, RecordingPen): self.pen = g else:", "\"endPath\": out.append((\"endPath\",[])) return out def subsegmentPoint(self, start=0, end=1): inc =", "p0 = self.pen.value[i-1][-1][-1] length_arc = calcCubicArcLength_cached(p0, p1, p2, p3) if", "end: _length += length_arc else: t = inc tries =", "tries = 0 while not ended: a, b = splitCubicAtT_cached(p0,", "__split_cache abcdt = (a, b, c, d, t) sc =", "= splitCubicAtT(a, b, c, d, t) __split_cache[abcdt] = s return", "start=None, end=None): global __cut_cache inc = self.inc length = self.length", "= __length_cache.get(abcd) if lc: return lc else: l = calcCubicArcLength(a,", "g, inc=0.0015): if isinstance(g, RecordingPen): self.pen = g else: self.pen", "0 out = [] for i, (t, pts) in enumerate(self.pen.value):", "subsegment = self.subsegment(start=start, end=end) try: t, (a, b, c) =", "lc = __length_cache.get(abcd) if lc: return lc else: l =", "else: l = calcCubicArcLength(a, b, c, d) __length_cache[abcd] = l", "in enumerate(self.pen.value): if t == \"curveTo\": p1, p2, p3 =", "raise_quadratic(start, a, b): c0 = start c1 = (c0[0] +", "inc = self.inc subsegment = self.subsegment(start=start, end=end) try: t, (a,", "b[1] + (2/3)*(a[1] - b[1])) c3 = (b[0], b[1]) return", "if t == \"lineTo\": pass # TODO if not ended:", "math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5) return c,", "a[1:])) else: t += inc tries += 1 if t", "import Rect, Point def raise_quadratic(start, a, b): c0 = start", "_length + length_a > end: ended = True out.append((\"curveTo\", a[1:]))", "c3] __length_cache = {} __split_cache = {} def splitCubicAtT_cached(a, b,", "c, d): #return calcCubicArcLength(a, b, c, d) global __length_cache abcd", "(2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1] - b[1])) c3 =", "if isinstance(g, RecordingPen): self.pen = g else: self.pen = RecordingPen()", "return sc else: s = splitCubicAtT(a, b, c, d, t)", "out.append((\"endPath\",[])) return out def subsegmentPoint(self, start=0, end=1): inc = self.inc", "def __init__(self, g, inc=0.0015): if isinstance(g, RecordingPen): self.pen = g", "Point def raise_quadratic(start, a, b): c0 = start c1 =", "\"lineTo\": pass # todo return length def subsegment(self, start=None, end=None):", "_length = 0 out = [] for i, (t, pts)", "b, c, d, t) sc = __split_cache.get(abcdt) if sc: return", "calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a, b, c, d) global", "from fontTools.misc.bezierTools import calcCubicArcLength, splitCubicAtT from coldtype.geometry import Rect, Point", "c3 = (b[0], b[1]) return [c1, c2, c3] __length_cache =", "= {} def splitCubicAtT_cached(a, b, c, d, t): global __split_cache", "+ (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1] - c0[1])) c2", "self.pen.value[i-1][-1][-1] length += calcCubicArcLength_cached(p0, p1, p2, p3) elif t ==", "self.inc = inc self.length = self.calcCurveLength() def calcCurveLength(self): length =", "+= inc tries += 1 if t == \"lineTo\": pass", "return lc else: l = calcCubicArcLength(a, b, c, d) __length_cache[abcd]", "sc = __split_cache.get(abcdt) if sc: return sc else: s =", "inc = self.inc length = self.length ended = False _length", "def splitCubicAtT_cached(a, b, c, d, t): global __split_cache abcdt =", "= inc self.length = self.calcCurveLength() def calcCurveLength(self): length = 0", "+ length_arc < end: _length += length_arc else: t =", "b[1], c[0] - b[0]) + math.pi*.5) return c, tangent except", "self.pen = g else: self.pen = RecordingPen() g.draw(self.pen) self.inc =", "(a, b, c) = subsegment[-2] tangent = math.degrees(math.atan2(c[1] - b[1],", "t += inc tries += 1 if t == \"lineTo\":", "abcd = (a, b, c, d) lc = __length_cache.get(abcd) if", "not ended: out.append((t, pts)) if out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return", "__split_cache.get(abcdt) if sc: return sc else: s = splitCubicAtT(a, b,", "elif t == \"lineTo\": pass # todo return length def", "ended = True out.append((\"curveTo\", a[1:])) else: t += inc tries", "from coldtype.geometry import Rect, Point def raise_quadratic(start, a, b): c0", "pts)) if out[-1][0] != \"endPath\": out.append((\"endPath\",[])) return out def subsegmentPoint(self,", "c1 = (c0[0] + (2/3)*(a[0] - c0[0]), c0[1] + (2/3)*(a[1]", "__split_cache = {} def splitCubicAtT_cached(a, b, c, d, t): global", "math from fontTools.pens.recordingPen import RecordingPen, replayRecording from fontTools.misc.bezierTools import calcCubicArcLength,", "c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1] + (2/3)*(a[1]", "+= calcCubicArcLength_cached(p0, p1, p2, p3) elif t == \"lineTo\": pass", "- b[0]) + math.pi*.5) return c, tangent except ValueError: return", "\"curveTo\": p1, p2, p3 = pts p0 = self.pen.value[i-1][-1][-1] length", "t == \"lineTo\": pass # todo return length def subsegment(self,", "length_a > end: ended = True out.append((\"curveTo\", a[1:])) else: t", "1 if t == \"lineTo\": pass # TODO if not", "- b[1])) c3 = (b[0], b[1]) return [c1, c2, c3]", "c0[1])) c2 = (b[0] + (2/3)*(a[0] - b[0]), b[1] +", "i, (t, pts) in enumerate(self.pen.value): if t == \"curveTo\": p1,", "p2, p3, t) length_a = calcCubicArcLength_cached(*a) if _length + length_a", "s def calcCubicArcLength_cached(a, b, c, d): #return calcCubicArcLength(a, b, c,", "pass # todo return length def subsegment(self, start=None, end=None): global", "tangent = math.degrees(math.atan2(c[1] - b[1], c[0] - b[0]) + math.pi*.5)", "c0 = start c1 = (c0[0] + (2/3)*(a[0] - c0[0]),", "__cut_cache inc = self.inc length = self.length ended = False", "[] for i, (t, pts) in enumerate(self.pen.value): if t ==", "(a, b, c, d) lc = __length_cache.get(abcd) if lc: return", "= calcCubicArcLength(a, b, c, d) __length_cache[abcd] = l return l", "splitCubicAtT from coldtype.geometry import Rect, Point def raise_quadratic(start, a, b):", "b, c, d, t): global __split_cache abcdt = (a, b,", "d) global __length_cache abcd = (a, b, c, d) lc" ]
[ "scores eps = eps_start # initialize epsilon for i_episode in", "torch device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \"\"\" Unity", "actions. Four discrete actions are available, corresponding to: 0 -", "this information, the agent has to learn how to best", "with ray-based perception of objects around the agent's forward direction.", "Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\"", "= env_info.vector_observations[0] # set initial score score = 0 while", "and contains the agent's velocity, along with ray-based perception of", "numpy as np from collections import deque from dqn_agent import", "Reinforcement Learning This script train an agent to navigate (and", "done: break scores_window.append(score) # save most recent score scores.append(score) #", "= env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size =", "env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0])", "the agent's velocity, along with ray-based perception of objects around", "of your agent is to collect as many yellow bananas", "- move forward. 1 - move backward. 2 - turn", "if i_episode % 100 == 0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode,", "recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\\rEpisode", "of training episodes eps_start (float): starting value of epsilon, for", "37 dimensions and contains the agent's velocity, along with ray-based", "from unityagents import UnityEnvironment import numpy as np from collections", "scores from each episode scores_window = deque(maxlen=100) # last 100", "to: 0 - move forward. 1 - move backward. 2", "0 - move forward. 1 - move backward. 2 -", "agent is to collect as many yellow bananas as possible", "eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params ====== n_episodes (int): maximum", "available, corresponding to: 0 - move forward. 1 - move", "====== n_episodes (int): maximum number of training episodes eps_start (float):", "a yellow banana, and a reward of -1 is provided", "import torch device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \"\"\"", "= env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done) state", "{:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode % 100 == 0: print('\\rEpisode", "agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0],", "reward of +1 is provided for collecting a yellow banana,", "to learn how to best select actions. Four discrete actions", "move backward. 2 - turn left. 3 - turn right.", "= eps_start # initialize epsilon for i_episode in range(1, n_episodes+1):", "avoiding blue bananas. The state space has 37 dimensions and", "multiplicative factor (per episode) for decreasing epsilon \"\"\" scores =", "reward if done: break scores_window.append(score) # save most recent score", "dimensions and contains the agent's velocity, along with ray-based perception", "unityagents import UnityEnvironment import numpy as np from collections import", "is provided for collecting a yellow banana, and a reward", "reward of -1 is provided for collecting a blue banana.", "velocity, along with ray-based perception of objects around the agent's", "get initial state state = env_info.vector_observations[0] # set initial score", "eps_start (float): starting value of epsilon, for epsilon-greedy action selection", "as many yellow bananas as possible while avoiding blue bananas.", "consecutive episodes. \"\"\" from unityagents import UnityEnvironment import numpy as", "i_episode % 100 == 0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))", "done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done)", "import numpy as np from collections import deque from dqn_agent", "of -1 is provided for collecting a blue banana. Thus,", "agent to navigate (and collect bananas!) in a large, square", "default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info =", "print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved in", "to collect as many yellow bananas as possible while avoiding", "episodes. \"\"\" from unityagents import UnityEnvironment import numpy as np", "Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode % 100 == 0:", "eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\\rEpisode {}\\tAverage Score:", "if torch.cuda.is_available() else \"cpu\") \"\"\" Unity environment configuration Mac: \"path/to/Banana.app\"", "state_size = len(env_info.vector_observations[0]) # initialize agent agent = Agent(state_size=state_size, action_size=action_size,", "epsilon-greedy action selection eps_end (float): minimum value of epsilon eps_decay", "save most recent score eps = max(eps_end, eps_decay*eps) # decrease", "eps_decay=0.99): \"\"\"Deep Q-Learning. Params ====== n_episodes (int): maximum number of", "for i_episode in range(1, n_episodes+1): # reset environment env_info =", "action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward, done", "train an agent to navigate (and collect bananas!) in a", "if np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))", "Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86):", "Q-Learning. Params ====== n_episodes (int): maximum number of training episodes", "env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done) state =", "(x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86,", "yellow bananas as possible while avoiding blue bananas. The state", "provided for collecting a blue banana. Thus, the goal of", "each episode scores_window = deque(maxlen=100) # last 100 scores eps", "env_info.local_done[0] agent.step(state, action, reward, next_state, done) state = next_state score", "contains the agent's velocity, along with ray-based perception of objects", "agent must get an average score of +13 over 100", "Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity environment env", "env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size", "= env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size =", "= agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward, done =", "score += reward if done: break scores_window.append(score) # save most", "np.mean(scores_window)), end=\"\") if i_episode % 100 == 0: print('\\rEpisode {}\\tAverage", "# save most recent score eps = max(eps_end, eps_decay*eps) #", "left. 3 - turn right. The task is episodic, and", "= max(eps_end, eps_decay*eps) # decrease epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode,", "episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores train()", "must get an average score of +13 over 100 consecutive", "goal of your agent is to collect as many yellow", "The state space has 37 dimensions and contains the agent's", "env_info.vector_observations[0] # set initial score score = 0 while True:", "\"\"\" Project for Udacity Danaodgree in Deep Reinforcement Learning This", "maximum number of training episodes eps_start (float): starting value of", "ray-based perception of objects around the agent's forward direction. Given", "from dqn_agent import Agent import torch device = torch.device(\"cuda:0\" if", "agent's velocity, along with ray-based perception of objects around the", "learn how to best select actions. Four discrete actions are", "for epsilon-greedy action selection eps_end (float): minimum value of epsilon", "many yellow bananas as possible while avoiding blue bananas. The", "solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break", "eps_decay (float): multiplicative factor (per episode) for decreasing epsilon \"\"\"", "forward. 1 - move backward. 2 - turn left. 3", "env.reset(train_mode=True)[brain_name] # get initial state state = env_info.vector_observations[0] # set", "over 100 consecutive episodes. \"\"\" from unityagents import UnityEnvironment import", "2 - turn left. 3 - turn right. The task", "in range(1, n_episodes+1): # reset environment env_info = env.reset(train_mode=True)[brain_name] #", "backward. 2 - turn left. 3 - turn right. The", "objects around the agent's forward direction. Given this information, the", "most recent score scores.append(score) # save most recent score eps", "(per episode) for decreasing epsilon \"\"\" scores = [] #", "= UnityEnvironment(file_name=\"Banana.app\") # get the default brain brain_name = env.brain_names[0]", "are available, corresponding to: 0 - move forward. 1 -", "{}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode % 100 ==", "to solve the environment, your agent must get an average", "reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state,", "print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode % 100", "the agent's forward direction. Given this information, the agent has", "len(env_info.vector_observations[0]) # initialize agent agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device)", "[] # list containing scores from each episode scores_window =", "decreasing epsilon \"\"\" scores = [] # list containing scores", "has to learn how to best select actions. Four discrete", "\"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity environment env = UnityEnvironment(file_name=\"Banana.app\") #", "next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward,", "end=\"\") if i_episode % 100 == 0: print('\\rEpisode {}\\tAverage Score:", "import UnityEnvironment import numpy as np from collections import deque", "reward, next_state, done) state = next_state score += reward if", "state = env_info.vector_observations[0] # set initial score score = 0", "best select actions. Four discrete actions are available, corresponding to:", "deque(maxlen=100) # last 100 scores eps = eps_start # initialize", "(int): maximum number of training episodes eps_start (float): starting value", "recent score scores.append(score) # save most recent score eps =", "square world. A reward of +1 is provided for collecting", "space has 37 dimensions and contains the agent's velocity, along", "from each episode scores_window = deque(maxlen=100) # last 100 scores", "= len(env_info.vector_observations[0]) # initialize agent agent = Agent(state_size=state_size, action_size=action_size, seed=0,", "episodic, and in order to solve the environment, your agent", "# start Unity environment env = UnityEnvironment(file_name=\"Banana.app\") # get the", "script train an agent to navigate (and collect bananas!) in", "0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved", "device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params ======", "Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux", "Project for Udacity Danaodgree in Deep Reinforcement Learning This script", "in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return", "\"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\"", "Given this information, the agent has to learn how to", "eps = eps_start # initialize epsilon for i_episode in range(1,", "(x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux", "initial state state = env_info.vector_observations[0] # set initial score score", "+13 over 100 consecutive episodes. \"\"\" from unityagents import UnityEnvironment", "+= reward if done: break scores_window.append(score) # save most recent", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \"\"\" Unity environment", "environment, your agent must get an average score of +13", "- turn left. 3 - turn right. The task is", "(float): minimum value of epsilon eps_decay (float): multiplicative factor (per", "while avoiding blue bananas. The state space has 37 dimensions", "a reward of -1 is provided for collecting a blue", "score score = 0 while True: action = agent.act(state, eps)", "for collecting a blue banana. Thus, the goal of your", "is provided for collecting a blue banana. Thus, the goal", "1 - move backward. 2 - turn left. 3 -", "env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) #", "a blue banana. Thus, the goal of your agent is", "collecting a yellow banana, and a reward of -1 is", "yellow banana, and a reward of -1 is provided for", "is episodic, and in order to solve the environment, your", "Danaodgree in Deep Reinforcement Learning This script train an agent", "your agent must get an average score of +13 over", "average score of +13 over 100 consecutive episodes. \"\"\" from", "from collections import deque from dqn_agent import Agent import torch", "torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \"\"\" Unity environment configuration Mac:", "= env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state,", "# get the default brain brain_name = env.brain_names[0] brain =", "bananas!) in a large, square world. A reward of +1", "next_state score += reward if done: break scores_window.append(score) # save", "{}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d}", "get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name]", "state space has 37 dimensions and contains the agent's velocity,", "= torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \"\"\" Unity environment configuration", "env_info = env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0]", "env = UnityEnvironment(file_name=\"Banana.app\") # get the default brain brain_name =", "seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params", "to best select actions. Four discrete actions are available, corresponding", "get an average score of +13 over 100 consecutive episodes.", "Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux", "minimum value of epsilon eps_decay (float): multiplicative factor (per episode)", "for collecting a yellow banana, and a reward of -1", "else \"cpu\") \"\"\" Unity environment configuration Mac: \"path/to/Banana.app\" Windows (x86):", "import deque from dqn_agent import Agent import torch device =", "(float): multiplicative factor (per episode) for decreasing epsilon \"\"\" scores", "number of training episodes eps_start (float): starting value of epsilon,", "Unity environment env = UnityEnvironment(file_name=\"Banana.app\") # get the default brain", "0 while True: action = agent.act(state, eps) env_info = env.step(action)[brain_name]", "eps_end (float): minimum value of epsilon eps_decay (float): multiplicative factor", "discrete actions are available, corresponding to: 0 - move forward.", "np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(),", "initial score score = 0 while True: action = agent.act(state,", "# get initial state state = env_info.vector_observations[0] # set initial", "list containing scores from each episode scores_window = deque(maxlen=100) #", "env_info.rewards[0], env_info.local_done[0] agent.step(state, action, reward, next_state, done) state = next_state", "break scores_window.append(score) # save most recent score scores.append(score) # save", "Deep Reinforcement Learning This script train an agent to navigate", "configuration Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux", "scores_window = deque(maxlen=100) # last 100 scores eps = eps_start", "eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params ====== n_episodes (int): maximum number", "bananas. The state space has 37 dimensions and contains the", "\"cpu\") \"\"\" Unity environment configuration Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\"", "# set initial score score = 0 while True: action", "actions are available, corresponding to: 0 - move forward. 1", "-1 is provided for collecting a blue banana. Thus, the", "corresponding to: 0 - move forward. 1 - move backward.", "brain brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name]", "A reward of +1 is provided for collecting a yellow", "initialize epsilon for i_episode in range(1, n_episodes+1): # reset environment", "episodes eps_start (float): starting value of epsilon, for epsilon-greedy action", "agent has to learn how to best select actions. Four", "Params ====== n_episodes (int): maximum number of training episodes eps_start", "of epsilon, for epsilon-greedy action selection eps_end (float): minimum value", "start Unity environment env = UnityEnvironment(file_name=\"Banana.app\") # get the default", "Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d} episodes!\\tAverage", "\"\"\" # start Unity environment env = UnityEnvironment(file_name=\"Banana.app\") # get", "n_episodes (int): maximum number of training episodes eps_start (float): starting", "{:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores", "an average score of +13 over 100 consecutive episodes. \"\"\"", "the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info", "\"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\"", "agent.step(state, action, reward, next_state, done) state = next_state score +=", "% 100 == 0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if", "\"\"\" from unityagents import UnityEnvironment import numpy as np from", "# list containing scores from each episode scores_window = deque(maxlen=100)", "- move backward. 2 - turn left. 3 - turn", "\"\"\" Unity environment configuration Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows", "scores.append(score) # save most recent score eps = max(eps_end, eps_decay*eps)", "of +13 over 100 consecutive episodes. \"\"\" from unityagents import", "along with ray-based perception of objects around the agent's forward", "in a large, square world. A reward of +1 is", "brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size", "as possible while avoiding blue bananas. The state space has", "turn left. 3 - turn right. The task is episodic,", "scores_window.append(score) # save most recent score scores.append(score) # save most", "environment env_info = env.reset(train_mode=True)[brain_name] # get initial state state =", "print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')", "while True: action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state,", "reset environment env_info = env.reset(train_mode=True)[brain_name] # get initial state state", "eps) env_info = env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0],", "\"\"\"Deep Q-Learning. Params ====== n_episodes (int): maximum number of training", "= env.reset(train_mode=True)[brain_name] # get initial state state = env_info.vector_observations[0] #", "100 == 0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14:", "turn right. The task is episodic, and in order to", "== 0: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment", "(float): starting value of epsilon, for epsilon-greedy action selection eps_end", "UnityEnvironment(file_name=\"Banana.app\") # get the default brain brain_name = env.brain_names[0] brain", "\"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless):", "environment env = UnityEnvironment(file_name=\"Banana.app\") # get the default brain brain_name", "direction. Given this information, the agent has to learn how", "def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params ====== n_episodes", "for decreasing epsilon \"\"\" scores = [] # list containing", "select actions. Four discrete actions are available, corresponding to: 0", "max(eps_end, eps_decay*eps) # decrease epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)),", "headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity environment env = UnityEnvironment(file_name=\"Banana.app\")", "score scores.append(score) # save most recent score eps = max(eps_end,", "action selection eps_end (float): minimum value of epsilon eps_decay (float):", "containing scores from each episode scores_window = deque(maxlen=100) # last", "(x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\"", "save most recent score scores.append(score) # save most recent score", "(x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\" Linux (x86_64):", "blue bananas. The state space has 37 dimensions and contains", "Learning This script train an agent to navigate (and collect", "collect bananas!) in a large, square world. A reward of", "value of epsilon eps_decay (float): multiplicative factor (per episode) for", "done) state = next_state score += reward if done: break", "set initial score score = 0 while True: action =", "Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep", "This script train an agent to navigate (and collect bananas!)", "action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning.", "the environment, your agent must get an average score of", "np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100,", "import Agent import torch device = torch.device(\"cuda:0\" if torch.cuda.is_available() else", "action, reward, next_state, done) state = next_state score += reward", "the agent has to learn how to best select actions.", "possible while avoiding blue bananas. The state space has 37", "epsilon eps_decay (float): multiplicative factor (per episode) for decreasing epsilon", "epsilon for i_episode in range(1, n_episodes+1): # reset environment env_info", "# decrease epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if", "to navigate (and collect bananas!) in a large, square world.", "blue banana. Thus, the goal of your agent is to", "\"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity environment", "score of +13 over 100 consecutive episodes. \"\"\" from unityagents", "scores = [] # list containing scores from each episode", "starting value of epsilon, for epsilon-greedy action selection eps_end (float):", "# last 100 scores eps = eps_start # initialize epsilon", "in Deep Reinforcement Learning This script train an agent to", "for Udacity Danaodgree in Deep Reinforcement Learning This script train", "decrease epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode", "Four discrete actions are available, corresponding to: 0 - move", "= 0 while True: action = agent.act(state, eps) env_info =", "order to solve the environment, your agent must get an", "forward direction. Given this information, the agent has to learn", "epsilon, for epsilon-greedy action selection eps_end (float): minimum value of", "(x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity environment env =", "task is episodic, and in order to solve the environment,", "banana, and a reward of -1 is provided for collecting", "episode) for decreasing epsilon \"\"\" scores = [] # list", "provided for collecting a yellow banana, and a reward of", "env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize agent", "of objects around the agent's forward direction. Given this information,", "agent's forward direction. Given this information, the agent has to", "(x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start", "Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" #", "navigate (and collect bananas!) in a large, square world. A", "np from collections import deque from dqn_agent import Agent import", "UnityEnvironment import numpy as np from collections import deque from", "Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless):", "i_episode in range(1, n_episodes+1): # reset environment env_info = env.reset(train_mode=True)[brain_name]", "right. The task is episodic, and in order to solve", "dqn_agent import Agent import torch device = torch.device(\"cuda:0\" if torch.cuda.is_available()", "headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64, headless): \"path/to/Banana_Linux_NoVis/Banana.x86_64\" \"\"\" # start Unity", "# initialize agent agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def", "bananas as possible while avoiding blue bananas. The state space", "solve the environment, your agent must get an average score", "episode scores_window = deque(maxlen=100) # last 100 scores eps =", "training episodes eps_start (float): starting value of epsilon, for epsilon-greedy", "eps_start # initialize epsilon for i_episode in range(1, n_episodes+1): #", "# initialize epsilon for i_episode in range(1, n_episodes+1): # reset", "env_info = env.reset(train_mode=True)[brain_name] # get initial state state = env_info.vector_observations[0]", "information, the agent has to learn how to best select", "initialize agent agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000,", "in order to solve the environment, your agent must get", "action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize agent agent", "epsilon \"\"\" scores = [] # list containing scores from", "Udacity Danaodgree in Deep Reinforcement Learning This script train an", "of +1 is provided for collecting a yellow banana, and", "move forward. 1 - move backward. 2 - turn left.", "+1 is provided for collecting a yellow banana, and a", "world. A reward of +1 is provided for collecting a", "Agent import torch device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")", "eps_decay*eps) # decrease epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")", "{:.2f}'.format(i_episode, np.mean(scores_window))) if np.mean(scores_window)>=14: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score:", "# save most recent score scores.append(score) # save most recent", "around the agent's forward direction. Given this information, the agent", "Thus, the goal of your agent is to collect as", "has 37 dimensions and contains the agent's velocity, along with", "\"path/to/Banana_Linux/Banana.x86\" Linux (x86_64): \"path/to/Banana_Linux/Banana.x86_64\" Linux (x86, headless): \"path/to/Banana_Linux_NoVis/Banana.x86\" Linux (x86_64,", "your agent is to collect as many yellow bananas as", "state state = env_info.vector_observations[0] # set initial score score =", "how to best select actions. Four discrete actions are available,", "True: action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state, reward,", "a large, square world. A reward of +1 is provided", "an agent to navigate (and collect bananas!) in a large,", "last 100 scores eps = eps_start # initialize epsilon for", "= [] # list containing scores from each episode scores_window", "score = 0 while True: action = agent.act(state, eps) env_info", "of epsilon eps_decay (float): multiplicative factor (per episode) for decreasing", "Unity environment configuration Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64):", "large, square world. A reward of +1 is provided for", "perception of objects around the agent's forward direction. Given this", "and in order to solve the environment, your agent must", "agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05,", "collections import deque from dqn_agent import Agent import torch device", "n_episodes+1): # reset environment env_info = env.reset(train_mode=True)[brain_name] # get initial", "factor (per episode) for decreasing epsilon \"\"\" scores = []", "value of epsilon, for epsilon-greedy action selection eps_end (float): minimum", "torch.cuda.is_available() else \"cpu\") \"\"\" Unity environment configuration Mac: \"path/to/Banana.app\" Windows", "= env.reset(train_mode=False)[brain_name] action_size = brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize", "3 - turn right. The task is episodic, and in", "environment configuration Mac: \"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\"", "- turn right. The task is episodic, and in order", "agent agent = Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0,", "= deque(maxlen=100) # last 100 scores eps = eps_start #", "\"\"\" scores = [] # list containing scores from each", "score eps = max(eps_end, eps_decay*eps) # decrease epsilon print('\\rEpisode {}\\tAverage", "epsilon print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\") if i_episode %", "collecting a blue banana. Thus, the goal of your agent", "train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99): \"\"\"Deep Q-Learning. Params ====== n_episodes (int):", "range(1, n_episodes+1): # reset environment env_info = env.reset(train_mode=True)[brain_name] # get", "state = next_state score += reward if done: break scores_window.append(score)", "brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize agent agent = Agent(state_size=state_size,", "brain_name = env.brain_names[0] brain = env.brains[brain_name] env_info = env.reset(train_mode=False)[brain_name] action_size", "# reset environment env_info = env.reset(train_mode=True)[brain_name] # get initial state", "next_state, done) state = next_state score += reward if done:", "100 consecutive episodes. \"\"\" from unityagents import UnityEnvironment import numpy", "is to collect as many yellow bananas as possible while", "(and collect bananas!) in a large, square world. A reward", "The task is episodic, and in order to solve the", "as np from collections import deque from dqn_agent import Agent", "= brain.vector_action_space_size state_size = len(env_info.vector_observations[0]) # initialize agent agent =", "selection eps_end (float): minimum value of epsilon eps_decay (float): multiplicative", "env.step(action)[brain_name] next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0] agent.step(state, action,", "most recent score eps = max(eps_end, eps_decay*eps) # decrease epsilon", "if done: break scores_window.append(score) # save most recent score scores.append(score)", "\"path/to/Banana.app\" Windows (x86): \"path/to/Banana_Windows_x86/Banana.exe\" Windows (x86_64): \"path/to/Banana_Windows_x86_64/Banana.exe\" Linux (x86): \"path/to/Banana_Linux/Banana.x86\"", "the goal of your agent is to collect as many", "collect as many yellow bananas as possible while avoiding blue", "100 scores eps = eps_start # initialize epsilon for i_episode", "and a reward of -1 is provided for collecting a", "banana. Thus, the goal of your agent is to collect", "deque from dqn_agent import Agent import torch device = torch.device(\"cuda:0\"", "= next_state score += reward if done: break scores_window.append(score) #", "= Agent(state_size=state_size, action_size=action_size, seed=0, device=device) def train(n_episodes=2000, eps_start=1.0, eps_end=0.05, eps_decay=0.99):" ]
[ "output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model ==", "= False local_normalize = True if model_name == 'egonn': #", "of Technology from layers.eca_block import ECABasicBlock from models.minkgl import MinkHead,", "# Warsaw University of Technology from layers.eca_block import ECABasicBlock from", "models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import", "number of channels for level 1 and above global_in_channels =", "{}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize", "conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model = MinkLoc3D()", "elif 'egonn' in model_params.model: model = create_egonn_model(model_params) else: raise NotImplementedError('Model", "len(planes) if len(global_in_levels) > 0: min_out_level = min(min_out_level, min(global_in_levels)) if", "= True if model_name == 'egonn': # THIS IS OUR", "i in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local", "min_out_level = min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5,", "layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model", "THIS IS OUR BEST MODEL block = ECABasicBlock planes =", "= MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) > 0: local_in_channels =", "> 0: min_out_level = min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes,", "planes = [32, 64, 64, 128, 128, 128, 128] layers", "return model def create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize =", "256 local_in_levels = [3, 4] local_map_channels = 64 local_descriptor_size =", "MinkLoc3D from misc.utils import ModelParams def model_factory(model_params: ModelParams): in_channels =", "IS OUR BEST MODEL block = ECABasicBlock planes = [32,", "128, 128, 128] layers = [1, 1, 1, 1, 1,", "layers.eca_block import ECABasicBlock from models.minkgl import MinkHead, MinkTrunk, MinkGL from", "model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block,", "len(local_in_levels) > 0: min_out_level = min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1,", "misc.utils import ModelParams def model_factory(model_params: ModelParams): in_channels = 1 if", "[1, 1, 1, 1, 1, 1, 1] global_in_levels = [5,", "import MinkHead, MinkTrunk, MinkGL from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc", "model_params.model: model = create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model))", "1, 1, 1, 1, 1] global_in_levels = [5, 6, 7]", "global_map_channels) if len(local_in_levels) > 0: local_in_channels = [planes[i-1] for i", "model_name = model_params.model global_normalize = False local_normalize = True if", "MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local,", "= [planes[i-1] for i in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels,", "= create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model", "feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model", "min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0: min_out_level = min(min_out_level, min(local_in_levels))", "global_in_levels = [5, 6, 7] global_map_channels = 128 global_descriptor_size =", "planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D':", "else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params:", "NotImplementedError(f'Unknown model: {model_name}') # Planes list number of channels for", "{model_name}') # Planes list number of channels for level 1", "if len(local_in_levels) > 0: local_in_channels = [planes[i-1] for i in", "University of Technology from layers.eca_block import ECABasicBlock from models.minkgl import", "7] global_map_channels = 128 global_descriptor_size = 256 local_in_levels = [3,", "models.minkgl import MinkHead, MinkTrunk, MinkGL from models.minkloc import MinkLoc from", "in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local =", "def model_factory(model_params: ModelParams): in_channels = 1 if model_params.model == 'MinkLoc':", "not implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams): model_name =", "global_in_channels, global_map_channels) if len(local_in_levels) > 0: local_in_channels = [planes[i-1] for", "net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM', global_normalize=global_normalize,", "import ModelParams def model_factory(model_params: ModelParams): in_channels = 1 if model_params.model", "Planes list number of channels for level 1 and above", "MODEL block = ECABasicBlock planes = [32, 64, 64, 128,", "model = create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return", "4] local_map_channels = 64 local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown", "'MinkLoc3D': model = MinkLoc3D() elif 'egonn' in model_params.model: model =", "create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize = False local_normalize =", "1, 1, 1, 1] global_in_levels = [5, 6, 7] global_map_channels", "Technology from layers.eca_block import ECABasicBlock from models.minkgl import MinkHead, MinkTrunk,", "= min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block,", "from misc.utils import ModelParams def model_factory(model_params: ModelParams): in_channels = 1", "> 0: min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0:", "= MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM', global_normalize=global_normalize, quantizer=model_params.quantizer)", "min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level)", "= [planes[i-1] for i in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels,", "= MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling)", "1] global_in_levels = [5, 6, 7] global_map_channels = 128 global_descriptor_size", "if model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes,", "0: min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0: min_out_level", "== 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down,", "BEST MODEL block = ECABasicBlock planes = [32, 64, 64,", "min(global_in_levels)) if len(local_in_levels) > 0: min_out_level = min(min_out_level, min(local_in_levels)) trunk", "conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global,", "pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model = MinkLoc3D() elif 'egonn'", "min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net", "model: {model_name}') # Planes list number of channels for level", "above global_in_channels = [planes[i-1] for i in global_in_levels] head_global =", "128] layers = [1, 1, 1, 1, 1, 1, 1]", "local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM', global_normalize=global_normalize, quantizer=model_params.quantizer) return net", "'egonn' in model_params.model: model = create_egonn_model(model_params) else: raise NotImplementedError('Model not", "= [32, 64, 64, 128, 128, 128, 128] layers =", "MinkTrunk, MinkGL from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D", "block = ECABasicBlock planes = [32, 64, 64, 128, 128,", "global_normalize = False local_normalize = True if model_name == 'egonn':", "MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local = None min_out_level = len(planes)", "else: head_local = None min_out_level = len(planes) if len(global_in_levels) >", "MinkGL from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from", "128 else: raise NotImplementedError(f'Unknown model: {model_name}') # Planes list number", "import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import ModelParams", "elif model_params.model == 'MinkLoc3D': model = MinkLoc3D() elif 'egonn' in", "head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) > 0: local_in_channels", "= len(planes) if len(global_in_levels) > 0: min_out_level = min(min_out_level, min(global_in_levels))", "128, 128, 128, 128] layers = [1, 1, 1, 1,", "1, 1, 1] global_in_levels = [5, 6, 7] global_map_channels =", "third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import ModelParams def model_factory(model_params: ModelParams):", "min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0: min_out_level =", "1 if model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim,", "'egonn': # THIS IS OUR BEST MODEL block = ECABasicBlock", "128 global_descriptor_size = 256 local_in_levels = [3, 4] local_map_channels =", "[planes[i-1] for i in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels)", "from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import ModelParams def model_factory(model_params:", "= 128 else: raise NotImplementedError(f'Unknown model: {model_name}') # Planes list", "layers = [1, 1, 1, 1, 1, 1, 1] global_in_levels", "from models.minkgl import MinkHead, MinkTrunk, MinkGL from models.minkloc import MinkLoc", "level 1 and above global_in_channels = [planes[i-1] for i in", "= 256 local_in_levels = [3, 4] local_map_channels = 64 local_descriptor_size", "model = MinkLoc3D() elif 'egonn' in model_params.model: model = create_egonn_model(model_params)", "for level 1 and above global_in_channels = [planes[i-1] for i", "list number of channels for level 1 and above global_in_channels", "= [5, 6, 7] global_map_channels = 128 global_descriptor_size = 256", "local_normalize = True if model_name == 'egonn': # THIS IS", "import ECABasicBlock from models.minkgl import MinkHead, MinkTrunk, MinkGL from models.minkloc", "1, 1] global_in_levels = [5, 6, 7] global_map_channels = 128", "[3, 4] local_map_channels = 64 local_descriptor_size = 128 else: raise", "MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif", "True if model_name == 'egonn': # THIS IS OUR BEST", "64 local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown model: {model_name}') #", "= MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local = None min_out_level =", "from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils", "MinkLoc3D() elif 'egonn' in model_params.model: model = create_egonn_model(model_params) else: raise", "64, 64, 128, 128, 128, 128] layers = [1, 1,", "= 128 global_descriptor_size = 256 local_in_levels = [3, 4] local_map_channels", "64, 128, 128, 128, 128] layers = [1, 1, 1,", "local_in_levels = [3, 4] local_map_channels = 64 local_descriptor_size = 128", "head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local = None min_out_level", "global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) > 0:", "== 'egonn': # THIS IS OUR BEST MODEL block =", "and above global_in_channels = [planes[i-1] for i in global_in_levels] head_global", "ECABasicBlock planes = [32, 64, 64, 128, 128, 128, 128]", "= None min_out_level = len(planes) if len(global_in_levels) > 0: min_out_level", "else: raise NotImplementedError(f'Unknown model: {model_name}') # Planes list number of", "0: local_in_channels = [planes[i-1] for i in local_in_levels] head_local =", "create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model def", "[planes[i-1] for i in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels)", "head_local = None min_out_level = len(planes) if len(global_in_levels) > 0:", "None min_out_level = len(planes) if len(global_in_levels) > 0: min_out_level =", "= MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk,", "OUR BEST MODEL block = ECABasicBlock planes = [32, 64,", "layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize,", "Warsaw University of Technology from layers.eca_block import ECABasicBlock from models.minkgl", "def create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize = False local_normalize", "channels for level 1 and above global_in_channels = [planes[i-1] for", "== 'MinkLoc3D': model = MinkLoc3D() elif 'egonn' in model_params.model: model", "model_name == 'egonn': # THIS IS OUR BEST MODEL block", "1, 1, 1, 1, 1, 1] global_in_levels = [5, 6,", "global_map_channels = 128 global_descriptor_size = 256 local_in_levels = [3, 4]", "False local_normalize = True if model_name == 'egonn': # THIS", "[32, 64, 64, 128, 128, 128, 128] layers = [1,", "in model_params.model: model = create_egonn_model(model_params) else: raise NotImplementedError('Model not implemented:", "if len(global_in_levels) > 0: min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels)", "model_factory(model_params: ModelParams): in_channels = 1 if model_params.model == 'MinkLoc': model", "local_map_channels) else: head_local = None min_out_level = len(planes) if len(global_in_levels)", "MinkLoc from third_party.minkloc3d.minkloc import MinkLoc3D from misc.utils import ModelParams def", "ModelParams def model_factory(model_params: ModelParams): in_channels = 1 if model_params.model ==", "ModelParams): model_name = model_params.model global_normalize = False local_normalize = True", "= ECABasicBlock planes = [32, 64, 64, 128, 128, 128,", "1 and above global_in_channels = [planes[i-1] for i in global_in_levels]", "raise NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams):", "local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown model: {model_name}') # Planes", "= min(min_out_level, min(global_in_levels)) if len(local_in_levels) > 0: min_out_level = min(min_out_level,", "block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size,", "# THIS IS OUR BEST MODEL block = ECABasicBlock planes", "local_map_channels = 64 local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown model:", "implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams): model_name = model_params.model", "MinkHead, MinkTrunk, MinkGL from models.minkloc import MinkLoc from third_party.minkloc3d.minkloc import", "for i in local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else:", "num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size, block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model =", "0: min_out_level = min(min_out_level, min(local_in_levels)) trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers,", "ECABasicBlock from models.minkgl import MinkHead, MinkTrunk, MinkGL from models.minkloc import", "# Planes list number of channels for level 1 and", "global_in_channels = [planes[i-1] for i in global_in_levels] head_global = MinkHead(global_in_levels,", "i in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels)", "in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) >", "local_in_channels, local_map_channels) else: head_local = None min_out_level = len(planes) if", "min_out_level = len(planes) if len(global_in_levels) > 0: min_out_level = min(min_out_level,", "trunk = MinkTrunk(in_channels=1, planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net =", "= [1, 1, 1, 1, 1, 1, 1] global_in_levels =", "planes=planes, layers=layers, conv0_kernel_size=5, block=block, min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size,", "MinkHead(global_in_levels, global_in_channels, global_map_channels) if len(local_in_levels) > 0: local_in_channels = [planes[i-1]", "model def create_egonn_model(model_params: ModelParams): model_name = model_params.model global_normalize = False", "from layers.eca_block import ECABasicBlock from models.minkgl import MinkHead, MinkTrunk, MinkGL", "= model_params.model global_normalize = False local_normalize = True if model_name", "if model_name == 'egonn': # THIS IS OUR BEST MODEL", "'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers, num_top_down=model_params.num_top_down, conv0_kernel_size=model_params.conv0_kernel_size,", "[5, 6, 7] global_map_channels = 128 global_descriptor_size = 256 local_in_levels", "of channels for level 1 and above global_in_channels = [planes[i-1]", "> 0: local_in_channels = [planes[i-1] for i in local_in_levels] head_local", "local_in_channels = [planes[i-1] for i in local_in_levels] head_local = MinkHead(local_in_levels,", "import MinkLoc3D from misc.utils import ModelParams def model_factory(model_params: ModelParams): in_channels", "model_params.model == 'MinkLoc3D': model = MinkLoc3D() elif 'egonn' in model_params.model:", "for i in global_in_levels] head_global = MinkHead(global_in_levels, global_in_channels, global_map_channels) if", "= MinkLoc3D() elif 'egonn' in model_params.model: model = create_egonn_model(model_params) else:", "min_out_level=min_out_level) net = MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM',", "model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size, output_dim=model_params.output_dim, planes=model_params.planes, layers=model_params.layers,", "len(local_in_levels) > 0: local_in_channels = [planes[i-1] for i in local_in_levels]", "= 64 local_descriptor_size = 128 else: raise NotImplementedError(f'Unknown model: {model_name}')", "= [3, 4] local_map_channels = 64 local_descriptor_size = 128 else:", "raise NotImplementedError(f'Unknown model: {model_name}') # Planes list number of channels", "MinkGL(trunk, local_head=head_local, local_descriptor_size=local_descriptor_size, local_normalize=local_normalize, global_head=head_global, global_descriptor_size=global_descriptor_size, global_pool_method='GeM', global_normalize=global_normalize, quantizer=model_params.quantizer) return", "global_descriptor_size = 256 local_in_levels = [3, 4] local_map_channels = 64", "ModelParams): in_channels = 1 if model_params.model == 'MinkLoc': model =", "= 1 if model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels, feature_size=model_params.feature_size,", "len(global_in_levels) > 0: min_out_level = min(min_out_level, min(global_in_levels)) if len(local_in_levels) >", "128, 128] layers = [1, 1, 1, 1, 1, 1,", "if len(local_in_levels) > 0: min_out_level = min(min_out_level, min(local_in_levels)) trunk =", "local_in_levels] head_local = MinkHead(local_in_levels, local_in_channels, local_map_channels) else: head_local = None", "block=model_params.block, pooling_method=model_params.pooling) elif model_params.model == 'MinkLoc3D': model = MinkLoc3D() elif", "model_params.model global_normalize = False local_normalize = True if model_name ==", "in_channels = 1 if model_params.model == 'MinkLoc': model = MinkLoc(in_channels=in_channels,", "6, 7] global_map_channels = 128 global_descriptor_size = 256 local_in_levels =", "NotImplementedError('Model not implemented: {}'.format(model_params.model)) return model def create_egonn_model(model_params: ModelParams): model_name" ]
[ "raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber)", "> 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel", "RuntimeError: self._Attach = None self._onAttach = None def _localDetachEvent(self, handle,", "_DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel", "ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach = None if", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if result >", "PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result = __func(self.handle) if result >", "= __func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8')", "0: raise PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int() __func =", "self._PropertyChange == None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def", "= PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if", "= PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, _IsLocal) if", "def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result =", "None) except RuntimeError: self._PropertyChange = None self._onPropertyChange = None @staticmethod", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise", "result = __func(self.handle, ctypes.byref(_ChannelName)) if result > 0: raise PhidgetException(result)", "= ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result =", "PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment", "PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func =", "setOnErrorHandler(self, handler): if handler == None: self._Error = None self._onError", "= __func(ctypes.byref(self.handle)) self.handle = None if res > 0: raise", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if result > 0: raise", "None self._onError = None else: self._Error = handler self._onError =", "_HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result", "getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32", "raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func", "== None: return Description = Description.decode('utf-8') self._Error(self, Code, Description) def", "__eq__(self, other): return hasattr(other, 'handle') and self.handle.value == other.handle.value def", "= ctypes.c_int(cls) _count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype =", "ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach =", "try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res = __func(self.handle,", "if result > 0: raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self,", "result > 0: raise PhidgetException(result) __Parent = Phidget() __Parent.handle =", "getChannel(self): _Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32", "return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed", "return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func =", "None if sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p,", "res = __func(self.handle, self._onError, None) except RuntimeError: self._Error = None", "ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle,", "PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func =", "else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach", "ctypes.c_int32 res = __func(self.handle, self._onError, None) except RuntimeError: self._Error =", "> 0: raise PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal", "= PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result", "= ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result > 0: raise", "result = __func(self.handle, _IsRemote) if result > 0: raise PhidgetException(result)", "self._onAttach, None) except RuntimeError: self._Attach = None self._onAttach = None", "result = __func(self.handle, ctypes.byref(_IsLocal)) if result > 0: raise PhidgetException(result)", "if result > 0: raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self,", "> 0: raise PhidgetException(result) return _ChannelSubclass.value def close(self): __func =", "_HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed", "result = __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result)", "result = __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def __del__(self):", "ctypes.byref(_DeviceClass)) if result > 0: raise PhidgetException(result) return _DeviceClass.value def", "__func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub))", "return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName", "raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1 ANY_CHANNEL =", "__func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass))", "def __init__(self): self.handle = ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory", "result > 0: raise PhidgetException(result) return _Channel.value def setChannel(self, Channel):", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "def _localAttachEvent(self, handle, userPtr): if self._Attach == None: return self._Attach(self)", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result =", "_MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result", "ctypes.byref(_count)) if result > 0: raise PhidgetException(result) return _count.value def", "self._onAttach = None if sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None,", "== None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self,", "= __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise PhidgetException(result) return", "result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT =", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion))", "= PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if", "= __func(self.handle, ctypes.byref(_Parent)) if result > 0: raise PhidgetException(result) __Parent", "return _HubPort.value def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func =", "def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype =", "= __func(self.handle, ctypes.byref(_IsRemote)) if result > 0: raise PhidgetException(result) return", "_DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result", "return self._Detach(self) def setOnDetachHandler(self, handler): if handler == None: self._Detach", "PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result", "result > 0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int()", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result = __func(self.handle,", "self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result =", "result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0: raise PhidgetException(result)", "ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange", "PhidgetException(result) return _ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype =", "__func(self.handle, _IsHubPortDevice) if result > 0: raise PhidgetException(result) def getIsLocal(self):", "PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result = __func(self.handle,", "getHubPortCount(self): _HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32", "0: raise PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func =", "__func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result = __func(self.handle, _timeout)", "if self._Detach == None: return self._Detach(self) def setOnDetachHandler(self, handler): if", "0: raise PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int() __func =", "self._onPropertyChange, None) except RuntimeError: self._PropertyChange = None self._onPropertyChange = None", "> 0: raise PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype", "_MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype", "= PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if", "0: raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p()", "raise PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent", "if result > 0: raise PhidgetException(result) return _Channel.value def setChannel(self,", "def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype =", "result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise PhidgetException(result)", "0: raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p()", "= PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if", "result > 0: raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion =", "IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32", "__func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass))", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0:", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0: raise", "__func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal))", "ctypes.c_int32 result = __func(self.handle, _IsRemote) if result > 0: raise", "ctypes.c_int32 result = __func(self.handle, _HubPort) if result > 0: raise", "ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result = __func(self.handle,", "_count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result", "raise PhidgetException(result) __Parent = Phidget() __Parent.handle = _Parent return __Parent", "def setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype", "_Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result", "= PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result = __func(_flags) if result", "if result > 0: raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls", "ctypes.c_void_p) self._Attach = None self._onAttach = None if sys.platform ==", "PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result", "Code, Description): if self._Error == None: return Description = Description.decode('utf-8')", "= ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result =", "def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if result >", "def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype", "None def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange == None:", "None self._onAttach = None def _localDetachEvent(self, handle, userPtr): if self._Detach", "__func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def __del__(self): __func =", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if result > 0: raise", "ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "None else: self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func", "= ctypes.c_int32 result = __func(self.handle, _Channel) if result > 0:", "raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed)", "_Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result", "if result > 0: raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self):", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber))", "import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import DeviceClass", "_IsLocal) if result > 0: raise PhidgetException(result) def getIsRemote(self): _IsRemote", "result > 0: raise PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote):", "_ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype", "if result > 0: raise PhidgetException(result) return _DeviceVersion.value def getHub(self):", "def __hash__(self): return self.handle.value def __str__(self): _value = (ctypes.c_char *", "self._PropertyChange = None self._onPropertyChange = None else: self._PropertyChange = handler", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result = __func(self.handle,", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0:", "propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if handler == None:", "= None self._onError = None def _localPropertyChangeEvent(self, handle, userPtr, propertyName):", "@staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.byref(_ServerHostname)) if result > 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def", "0: raise PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort): _HubPort =", "= None if sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p,", "return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func =", "= ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange =", "ctypes.byref(_Attached)) if result > 0: raise PhidgetException(result) return _Attached.value def", "_deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result", "= PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if", "= ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError", "> 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber", "except RuntimeError: self._PropertyChange = None self._onPropertyChange = None @staticmethod def", "= PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if", "= PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, _IsRemote) if", "= ctypes.c_int32 result = __func(self.handle, _HubPort) if result > 0:", "PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p() __func =", "0: raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result =", "ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result = __func(self.handle,", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result >", "= ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result =", "PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func =", "= ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count)) if result >", "result = __func(self.handle, _HubPortSpeed) if result > 0: raise PhidgetException(result)", "_value.value.decode('utf- 8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32", "PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype", "= ctypes.c_int32 result = __func(self.handle, _timeout) if result > 0:", "def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result =", "= propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if handler ==", "'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory =", "handler): if handler == None: self._Detach = None self._onDetach =", "if result > 0: raise PhidgetException(result) return _HubPort.value def setHubPort(self,", "ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle,", "if result > 0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName =", "def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype =", "ctypes.byref(_ServerPeerName)) if result > 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def", "self._onPropertyChange = None else: self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent)", "_valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func", "return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func =", "_IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0:", "self._Attach == None: return self._Attach(self) def setOnAttachHandler(self, handler): if handler", "result > 0: raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed):", "__func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0: raise PhidgetException(result) return _ChannelSubclass.value", "__func.restype = ctypes.c_int32 result = __func(self.handle, _IsLocal) if result >", "else: self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func =", "sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else:", "__func.restype = ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count)) if result", "def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype =", "ctypes.byref(_ChannelClass)) if result > 0: raise PhidgetException(result) return _ChannelClass.value def", "PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if result", "self._Error = None self._onError = None def _localPropertyChangeEvent(self, handle, userPtr,", "Phidget22.Async import * from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import", "ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def", "= ctypes.c_int32 result = __func(self.handle, _IsRemote) if result > 0:", "= PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if", "result > 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed", "sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)", "raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if self._Attach == None:", "__func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, _IsLocal)", "if result > 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self):", "> 0: raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result >", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if result >", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if result > 0: raise", "except RuntimeError: self._Attach = None self._onAttach = None def _localDetachEvent(self,", "ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "None @staticmethod def finalize(flags): _flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize", "= PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if", "> 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed =", "_IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result", "_IsLocal.value def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal", "__func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU))", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result >", "__func.restype = ctypes.c_int32 res = __func(self.handle, self._onError, None) except RuntimeError:", "ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError = None if", "if result > 0: raise PhidgetException(result) __Hub = Phidget() __Hub.handle", "__func(self.handle, self._onDetach, None) except RuntimeError: self._Detach = None self._onDetach =", "_HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result", "__func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result > 0:", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0: raise", "0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32()", "ctypes.c_int32 result = __func(self.handle, _Channel) if result > 0: raise", "= None self._onDetach = None if sys.platform == 'win32': self._ErrorFactory", "= None else: self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent) try:", "ctypes from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import * from", "__func.restype = ctypes.c_int32 res = __func(self.handle, self._onDetach, None) except RuntimeError:", "= __func(self.handle, ctypes.byref(_ServerHostname)) if result > 0: raise PhidgetException(result) return", "PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func", "self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res =", "PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result", "from Phidget22.Async import * from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass", "__func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname))", "_IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result", "self._onError = None else: self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent)", "__func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0: raise PhidgetException(result) return _DeviceVersion.value", "result > 0: raise PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int()", "ChannelSubclass from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import DeviceID from", "result > 0: raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice", "ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p,", "> 0: raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel =", "RuntimeError: self._Detach = None self._onDetach = None def _localErrorEvent(self, handle,", "ctypes.c_int32 result = __func(self.handle, _timeout) if result > 0: raise", "DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32", "= PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if", "self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler", "from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass", "return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID", "ctypes.c_int32 result = __func() if result > 0: raise PhidgetException(result)", "= PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if", "= None if sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p,", "raise PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal", "result > 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName):", "except RuntimeError: self._Detach = None self._onDetach = None def _localErrorEvent(self,", "= Phidget() __Hub.handle = _Hub return __Hub def getHubPort(self): _HubPort", "result > 0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32()", "0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber =", "__func(self.handle, _cls, ctypes.byref(_count)) if result > 0: raise PhidgetException(result) return", "_ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result", "result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0: raise PhidgetException(result)", "= __func(self.handle, _IsHubPortDevice) if result > 0: raise PhidgetException(result) def", "getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32", "result > 0: raise PhidgetException(result) __Hub = Phidget() __Hub.handle =", "__func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel))", "PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result", "if result > 0: raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self):", "result > 0: raise PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p()", "else: __func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen) return", "def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if result >", "__func(self.handle, _HubPortSpeed) if result > 0: raise PhidgetException(result) def getMaxHubPortSpeed(self):", "_IsRemote.value def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote", "== other.handle.value def __hash__(self): return self.handle.value def __str__(self): _value =", "'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None,", "None) except RuntimeError: self._Attach = None self._onAttach = None def", "(ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func =", "if result > 0: raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self):", "setOnAttachHandler(self, handler): if handler == None: self._Attach = None self._onAttach", "raise PhidgetException(result) return _Attached.value def getChannel(self): _Channel = ctypes.c_int() __func", "= None self._onPropertyChange = None @staticmethod def finalize(flags): _flags =", "__func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName))", "__func(ctypes.byref(self.handle)) self.handle = None if res > 0: raise PhidgetException(res)", "@staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype", "def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count = ctypes.c_uint32() __func", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result =", "__func.restype = ctypes.c_int32 res = __func(self.handle, self._onAttach, None) except RuntimeError:", "getDeviceName(self): _DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32", "== 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory", "writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype =", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result = __func(self.handle,", "== None: return self._Detach(self) def setOnDetachHandler(self, handler): if handler ==", "result = __func(self.handle, _IsHubPortDevice) if result > 0: raise PhidgetException(result)", "if result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT", "0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32()", "if result > 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self):", "0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel =", "__func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8')", "def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype =", "self._PropertyChange = None self._onPropertyChange = None def __eq__(self, other): return", "_MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result", "result = __func(self.handle, _Channel) if result > 0: raise PhidgetException(result)", "raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func", "def getIsChannel(self): _IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype =", "__func.restype = ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if result >", "ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if result > 0: raise", "= PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle =", "PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func =", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result =", "import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import PhidgetException", "result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0: raise PhidgetException(result)", "__func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName))", "self._onAttach = None def _localDetachEvent(self, handle, userPtr): if self._Detach ==", "result = __func() if result > 0: raise PhidgetException(result) def", "result = __func(self.handle, ctypes.byref(_IsRemote)) if result > 0: raise PhidgetException(result)", "else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None", "__func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value", "_VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result", "result = __func(self.handle, ctypes.byref(_DeviceID)) if result > 0: raise PhidgetException(result)", "setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype =", "_ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype", "= __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0: raise PhidgetException(result) return", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result =", "= -1 ANY_CHANNEL = -1 ANY_LABEL = None INFINITE_TIMEOUT =", "def getDeviceID(self): _DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype =", "ctypes.byref(_HubPortSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSpeed.value def", "= PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if", "ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)", "PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype", "import DeviceClass from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result >", "PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onError, None) except", "return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func =", "= None self._onPropertyChange = None else: self._PropertyChange = handler self._onPropertyChange", "return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func =", "= ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result =", "self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler", "__func.restype = ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None) except RuntimeError:", "ctypes.c_char_p) self._Error = None self._onError = None if sys.platform ==", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result =", "__func.restype = ctypes.c_int32 result = __func(self.handle, _timeout) if result >", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0:", "result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0: raise PhidgetException(result)", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle,", "> 0: raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice =", "__func(self.handle) if result > 0: raise PhidgetException(result) def getDeviceChannelCount(self, cls):", "getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32", "from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import PhidgetException class Phidget:", "handle, userPtr): if self._Detach == None: return self._Detach(self) def setOnDetachHandler(self,", "handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype =", "raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p()", "0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func =", "self.handle = ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None,", "> 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion =", "_DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result", "if result > 0: raise PhidgetException(result) __Parent = Phidget() __Parent.handle", "def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype", "ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if result > 0: raise", "self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res =", "__func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onAttach,", "def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype =", "getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32", "self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None", "result > 0: raise PhidgetException(result) return _Attached.value def getChannel(self): _Channel", "8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if result >", "self._onError, None) except RuntimeError: self._Error = None self._onError = None", "_HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result", "self._Detach = None self._onDetach = None if sys.platform == 'win32':", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0: raise", "_value = (ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel():", "ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result >", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if result > 0:", "PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if result", "ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import DeviceClass from", "result > 0: raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice):", "return _ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result = __func(self.handle,", "self._PropertyChange = None self._onPropertyChange = None @staticmethod def finalize(flags): _flags", "result = __func(self.handle, _cls, ctypes.byref(_count)) if result > 0: raise", "getIsChannel(self): _IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32", "_DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype", "__func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel))", "= __func(ctypes.byref(_LibraryVersion)) if result > 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8')", "raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func", "= ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if result > 0:", "ctypes.byref(_HubPortCount)) if result > 0: raise PhidgetException(result) return _HubPortCount.value def", "= None else: self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try:", "= __func(self.handle, _HubPort) if result > 0: raise PhidgetException(result) def", "return _DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub", "_HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype", "self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach =", "sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory", "__func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel))", "PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func =", "_timeout) if result > 0: raise PhidgetException(result) def getParent(self): _Parent", "-1 ANY_HUB_PORT = -1 ANY_CHANNEL = -1 ANY_LABEL = None", "return __Hub def getHubPort(self): _HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort", "None: self._Error = None self._onError = None else: self._Error =", "self._Attach(self) def setOnAttachHandler(self, handler): if handler == None: self._Attach =", "raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count =", "PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype", "= ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result =", "ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0: raise", "= PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if", "setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype =", "PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func", "= self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res", "_ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result", "ctypes.byref(_ChannelName)) if result > 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def", "_ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result", "= __func(self.handle, ctypes.byref(_Attached)) if result > 0: raise PhidgetException(result) return", "= ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func =", "__func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPortSpeed)", "if result > 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self):", "result = __func(self.handle, ctypes.byref(_Channel)) if result > 0: raise PhidgetException(result)", "= __func(self.handle, ctypes.byref(_ChannelName)) if result > 0: raise PhidgetException(result) return", "_ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result", "ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach = None if sys.platform", "= __func(self.handle, ctypes.byref(_Hub)) if result > 0: raise PhidgetException(result) __Hub", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if result > 0: raise", "getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32", "PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result", "> 0: raise PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int() __func", "raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func", "result > 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0:", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result = __func(self.handle,", "self._onAttach = None else: self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent)", "result = __func(self.handle, _timeout) if result > 0: raise PhidgetException(result)", "None self._onAttach = None if sys.platform == 'win32': self._DetachFactory =", "result > 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed", "__func(_flags) if result > 0: raise PhidgetException(result) @staticmethod def getLibraryVersion():", "0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func =", "__del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle))", "= ctypes.c_int32 result = __func() if result > 0: raise", "__func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName))", "ctypes.byref(_DeviceName)) if result > 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def", "result > 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel", "hasattr(other, 'handle') and self.handle.value == other.handle.value def __hash__(self): return self.handle.value", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if result >", "PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, _IsRemote) if result", "= None def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange ==", "PhidgetSupport from Phidget22.Async import * from Phidget22.ChannelClass import ChannelClass from", "handler == None: self._Error = None self._onError = None else:", "_DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle,", "setOnPropertyChangeHandler(self, handler): if handler == None: self._PropertyChange = None self._onPropertyChange", "None else: self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func", "__func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName))", "> 0: raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p()", "getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32", "Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import PhidgetException class Phidget: def", "if handler == None: self._Detach = None self._onDetach = None", "PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if result", "ctypes.byref(_ServerUniqueName)) if result > 0: raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def", "def getServerName(self): _ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype =", "0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel =", "'handle') and self.handle.value == other.handle.value def __hash__(self): return self.handle.value def", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if result > 0: raise", "= ctypes.c_int32 res = __func(self.handle, self._onAttach, None) except RuntimeError: self._Attach", "= __func(self.handle, _cls, ctypes.byref(_count)) if result > 0: raise PhidgetException(result)", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result =", "== None: self._Attach = None self._onAttach = None else: self._Attach", "== 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory =", "__Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype", "= __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) def", "_ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result", "__func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod", "= handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype", "return _count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass", "= __func(self.handle, ctypes.byref(_deviceLabel)) if result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result =", "result > 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber", "PhidgetException(result) def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count = ctypes.c_uint32()", "ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "result > 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion", "PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result", "= __func(self.handle, _DeviceSerialNumber) if result > 0: raise PhidgetException(result) def", "ctypes.c_int32 res = __func(self.handle, self._onDetach, None) except RuntimeError: self._Detach =", "def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res =", "raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p() __func", "__func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount))", "if result > 0: raise PhidgetException(result) return _IsLocal.value def setIsLocal(self,", "None else: self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func", "__init__(self): self.handle = ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory =", "= ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result =", "def getChannel(self): _Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype =", "result = __func(self.handle) if result > 0: raise PhidgetException(result) def", "if result > 0: raise PhidgetException(result) return _IsRemote.value def setIsRemote(self,", "_flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result", "ctypes.c_int(cls) _count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32", "> 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName =", "PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p() __func =", "ctypes.byref(_IsChannel)) if result > 0: raise PhidgetException(result) return _IsChannel.value def", "_cls, ctypes.byref(_count)) if result > 0: raise PhidgetException(result) return _count.value", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result >", "HubPort): _HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32", "PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func", "__func.restype = ctypes.c_int32 result = __func(self.handle, _IsRemote) if result >", "ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle,", "result > 0: raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls =", "deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32", "raise PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached", "__func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed))", "_DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result", "PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if result", "__func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached))", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if result > 0: raise", "__func(self.handle, _IsLocal) if result > 0: raise PhidgetException(result) def getIsRemote(self):", "__func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0: raise PhidgetException(result) return _HubPortSpeed.value", "_Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result", "__func.restype = ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if result >", "return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion", "== 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory =", "ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange = None", "== 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else:", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if result > 0:", "Phidget: def __init__(self): self.handle = ctypes.c_void_p() if sys.platform == 'win32':", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if result > 0:", "= ctypes.c_int32 res = __func(self.handle, self._onError, None) except RuntimeError: self._Error", "= PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if", "= __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) return", "handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype =", "__func(self.handle, ctypes.byref(_IsChannel)) if result > 0: raise PhidgetException(result) return _IsChannel.value", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result >", "= ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None,", "ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result = __func(self.handle,", "= None def __eq__(self, other): return hasattr(other, 'handle') and self.handle.value", "timeout): _timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32", "= PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if", "result = __func(ctypes.byref(_LibraryVersion)) if result > 0: raise PhidgetException(result) return", "0: raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice =", "PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def", "PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p() __func =", "__func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value", "__func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0: raise PhidgetException(result) return _IsHubPortDevice.value", "= __func(self.handle, self._onError, None) except RuntimeError: self._Error = None self._onError", "result > 0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int()", "if result > 0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed =", "__func(self.handle, _timeout) if result > 0: raise PhidgetException(result) def getParent(self):", "result = __func(self.handle, ctypes.byref(_ServerHostname)) if result > 0: raise PhidgetException(result)", "= ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)", "0: raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func", "= None self._onAttach = None def _localDetachEvent(self, handle, userPtr): if", "self._onError = None if sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None,", "= PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPort) if", "> 0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1", "ctypes.byref(_IsLocal)) if result > 0: raise PhidgetException(result) return _IsLocal.value def", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0:", "PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if result", "setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype =", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result = __func(self.handle,", "result > 0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p()", "= __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) def", "result = __func(self.handle, ctypes.byref(_deviceLabel)) if result > 0: raise PhidgetException(result)", "if result > 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self):", "getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32", "raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func", "ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach = None if sys.platform", "= __func(self.handle, ctypes.byref(_ChannelSubclass)) if result > 0: raise PhidgetException(result) return", "result > 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber():", "__func(self.handle, ctypes.byref(_HubPort)) if result > 0: raise PhidgetException(result) return _HubPort.value", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if result > 0:", "PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result =", "= None def _localDetachEvent(self, handle, userPtr): if self._Detach == None:", "= __func(self.handle, ctypes.byref(_Channel)) if result > 0: raise PhidgetException(result) return", "> 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID =", "= ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result =", "= ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach = None", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result = __func(self.handle,", "if result > 0: raise PhidgetException(result) def getAttached(self): _Attached =", "raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice)", "0: raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber =", "0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p()", "= __func(self.handle, _IsRemote) if result > 0: raise PhidgetException(result) def", "0: raise PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p()", "= __func(self.handle, self._onDetach, None) except RuntimeError: self._Detach = None self._onDetach", "> 0: raise PhidgetException(result) return _Channel.value def setChannel(self, Channel): _Channel", "= None def _localErrorEvent(self, handle, userPtr, Code, Description): if self._Error", "ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo", "_Channel.value def setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel", "handle, userPtr, Code, Description): if self._Error == None: return Description", "if result > 0: raise PhidgetException(result) return _ChannelSubclass.value def close(self):", "= PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result = __func() if result", "PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result", "> 0: raise PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice", "= Phidget() __Parent.handle = _Parent return __Parent def getServerHostname(self): _ServerHostname", "result > 0: raise PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result >", "getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32", "PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func =", "_LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result", "_IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result =", "ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.c_int32 result = __func(self.handle, _IsLocal) if result > 0: raise", "ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result > 0: raise PhidgetException(result)", "handler == None: self._Attach = None self._onAttach = None else:", "> 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName", "None self._onPropertyChange = None else: self._PropertyChange = handler self._onPropertyChange =", "0: raise PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int()", "def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype =", "if result > 0: raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout", "_IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if result > 0:", "_HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result", "= PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result = __func(self.handle, _Channel) if", "def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype =", "def setOnDetachHandler(self, handler): if handler == None: self._Detach = None", "raise PhidgetException(result) __Hub = Phidget() __Hub.handle = _Hub return __Hub", "result = __func(self.handle, ctypes.byref(_HubPort)) if result > 0: raise PhidgetException(result)", "__func = PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if result > 0:", "= ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach = None", "raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func =", "if result > 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self,", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0:", "__func(self.handle, ctypes.byref(_Channel)) if result > 0: raise PhidgetException(result) return _Channel.value", "RuntimeError: self._Error = None self._onError = None def _localPropertyChangeEvent(self, handle,", "__func(self.handle, ctypes.byref(_DeviceID)) if result > 0: raise PhidgetException(result) return _DeviceID.value", "__func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if", "= PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result = __func(self.handle,", "result > 0: raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber):", "None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler):", "cls): _cls = ctypes.c_int(cls) _count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount", "self._Attach = None self._onAttach = None def _localDetachEvent(self, handle, userPtr):", "raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func", "0: raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int()", "__func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result > 0:", "0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int()", "ctypes.byref(_DeviceVersion)) if result > 0: raise PhidgetException(result) return _DeviceVersion.value def", "= __func(self.handle) if result > 0: raise PhidgetException(result) def getDeviceChannelCount(self,", "ctypes.byref(_DeviceID)) if result > 0: raise PhidgetException(result) return _DeviceID.value def", "_ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result", "return self.handle.value def __str__(self): _value = (ctypes.c_char * 65536)() _valueLen", "result = __func(self.handle, ctypes.byref(_Hub)) if result > 0: raise PhidgetException(result)", "= ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise", "> 0: raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed =", "= PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onError, None)", "self._Detach = None self._onDetach = None else: self._Detach = handler", "= Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self, handler): if handler", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result = __func(self.handle,", "return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0:", "def setOnAttachHandler(self, handler): if handler == None: self._Attach = None", "__func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName))", "raise PhidgetException(result) return _Channel.value def setChannel(self, Channel): _Channel = ctypes.c_int(Channel)", "handler): if handler == None: self._Error = None self._onError =", "def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype =", "Phidget22.PhidgetException import PhidgetException class Phidget: def __init__(self): self.handle = ctypes.c_void_p()", "__func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf-", "_localErrorEvent(self, handle, userPtr, Code, Description): if self._Error == None: return", "else: self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func =", "0: raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count", "raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int() __func", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise", "if result > 0: raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self,", "ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype = ctypes.c_int32", "__hash__(self): return self.handle.value def __str__(self): _value = (ctypes.c_char * 65536)()", "def _localDetachEvent(self, handle, userPtr): if self._Detach == None: return self._Detach(self)", "= _Parent return __Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0: raise", "= __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return", "result = __func(self.handle, ctypes.byref(_Parent)) if result > 0: raise PhidgetException(result)", "getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32", "__func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName))", "ctypes.c_void_p) self._Detach = None self._onDetach = None if sys.platform ==", "__func(self.handle, _IsRemote) if result > 0: raise PhidgetException(result) def open(self):", "_ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype", "> 0: raise PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub =", "self._Error == None: return Description = Description.decode('utf-8') self._Error(self, Code, Description)", "= None self._onAttach = None else: self._Attach = handler self._onAttach", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0: raise", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if result > 0:", "IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result = __func(self.handle,", "__Hub.handle = _Hub return __Hub def getHubPort(self): _HubPort = ctypes.c_int()", "__func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) def getDeviceName(self):", "_Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result", "def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result =", "DeviceClass from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from", "ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)", "__func(self.handle, ctypes.byref(_ServerHostname)) if result > 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8')", "return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber", "def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype", "return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func =", "__func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8')", "= PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if", "0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int()", "PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if result", "getChannelClass(self): _ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32", "0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName =", "PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if result >", "PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result", "PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func =", "DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32", "_count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype", "> 0: raise PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote", "PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if result", "_LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber", "65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else:", "ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete", "_LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32", "__func(self.handle, ctypes.byref(_Hub)) if result > 0: raise PhidgetException(result) __Hub =", "def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype =", "> 0: raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed", "sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory", "__func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName))", "import ChannelSubclass from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import DeviceID", "result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0: raise PhidgetException(result)", "= ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if result > 0:", "_ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype", "__func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID))", "= None self._onError = None else: self._Error = handler self._onError", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result =", "ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result = __func(self.handle,", "> 0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func", "= PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onDetach, None)", "= __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise PhidgetException(result) return", "from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID", "> 0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int() __func", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype = ctypes.c_int32 result =", "= PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if", "raise PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func", "__func(self.handle, ctypes.byref(_Attached)) if result > 0: raise PhidgetException(result) return _Attached.value", "def setOnErrorHandler(self, handler): if handler == None: self._Error = None", "= PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value),", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if result > 0:", "ctypes.byref(_Channel)) if result > 0: raise PhidgetException(result) return _Channel.value def", "__func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, _IsRemote)", "ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise PhidgetException(result)", "finalize(flags): _flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32", "_DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result", "ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result = __func(_flags)", "def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype =", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result = __func(self.handle,", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result >", "raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p() __func", "getParent(self): _Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32", "result = __func(self.handle, ctypes.byref(_HubPortCount)) if result > 0: raise PhidgetException(result)", "= PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if", "PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if result", "_HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result", "Description): if self._Error == None: return Description = Description.decode('utf-8') self._Error(self,", "userPtr): if self._Detach == None: return self._Detach(self) def setOnDetachHandler(self, handler):", "PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int() __func =", "__func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onError,", "else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error =", "self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p,", "PhidgetException(result) __Hub = Phidget() __Hub.handle = _Hub return __Hub def", "try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res = __func(self.handle,", "raise PhidgetException(result) return _DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p() __func", "_DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result", "= (ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func", "PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func =", "_DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber", "self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p,", "result > 0: raise PhidgetException(result) return _DeviceID.value def getDeviceLabel(self): _DeviceLabel", "ctypes.byref(_ChannelSubclass)) if result > 0: raise PhidgetException(result) return _ChannelSubclass.value def", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result >", "result > 0: raise PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal):", "setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype =", "= __func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0: raise PhidgetException(result) return", "_IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype", "def __eq__(self, other): return hasattr(other, 'handle') and self.handle.value == other.handle.value", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0:", "= PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result = __func(self.handle) if result", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if result > 0:", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if result > 0: raise", "_IsHubPortDevice) if result > 0: raise PhidgetException(result) def getIsLocal(self): _IsLocal", "__func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8')", "ctypes.byref(_DeviceSKU)) if result > 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8') def", "= __func() if result > 0: raise PhidgetException(result) def getAttached(self):", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result = __func(self.handle,", "= __func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0: raise PhidgetException(result) return", "__func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onDetach,", "None: return self._Attach(self) def setOnAttachHandler(self, handler): if handler == None:", "= __func(self.handle, ctypes.byref(_HubPort)) if result > 0: raise PhidgetException(result) return", "__func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onPropertyChange,", "handle, userPtr, propertyName): if self._PropertyChange == None: return propertyName =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if result >", "_VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel", "return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName", "ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) def getDeviceName(self): _DeviceName", "ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError = None if sys.platform", "handler): if handler == None: self._PropertyChange = None self._onPropertyChange =", "= None self._onDetach = None def _localErrorEvent(self, handle, userPtr, Code,", "PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result =", "= ctypes.c_void_p() if sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p,", "IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32", "getDeviceID(self): _DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32", "__Parent.handle = _Parent return __Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p()", "__str__(self): _value = (ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536) if", "> 0: raise PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int() __func", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result = __func(self.handle,", "= __func(self.handle, _timeout) if result > 0: raise PhidgetException(result) def", "__func.restype = ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if result >", "handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype =", "res = __func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange = None", "> 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed =", "= PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result >", "PhidgetException(result) return _IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result = __func(self.handle,", "_Hub return __Hub def getHubPort(self): _HubPort = ctypes.c_int() __func =", "import PhidgetException class Phidget: def __init__(self): self.handle = ctypes.c_void_p() if", "PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, _IsLocal) if result", "Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import", "_ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result", "getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32", "if result > 0: raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self):", "def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange == None: return", "_ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype", "__func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort))", "= __func(self.handle, self._onAttach, None) except RuntimeError: self._Attach = None self._onAttach", "_ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype", "raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func =", "ctypes.byref(_ChannelClassName)) if result > 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def", "None if sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)", "self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach =", "raise PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal)", "self._Detach = None self._onDetach = None def _localErrorEvent(self, handle, userPtr,", "= ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result =", "_ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result", "ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count)) if result > 0:", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "> 0: raise PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass =", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if result > 0:", "self._onDetach = None if sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None,", "ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle,", "= __func(self.handle, ctypes.byref(_HubPortCount)) if result > 0: raise PhidgetException(result) return", "Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import", "'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory", "def getParent(self): _Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype =", "res = __func(self.handle, self._onDetach, None) except RuntimeError: self._Detach = None", "= __func(self.handle, _HubPortSpeed) if result > 0: raise PhidgetException(result) def", "def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype", "= None if res > 0: raise PhidgetException(res) def _localAttachEvent(self,", "> 0: raise PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int() __func", "= ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result =", "raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8'))", "0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int() __func =", "> 0: raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName =", "openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype =", "return hasattr(other, 'handle') and self.handle.value == other.handle.value def __hash__(self): return", "PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result", "None self._onDetach = None if sys.platform == 'win32': self._ErrorFactory =", "return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if", "__func = PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent))", "def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype =", "0: raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32()", "PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result", "self.handle.value == other.handle.value def __hash__(self): return self.handle.value def __str__(self): _value", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0: raise", "ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None self._onDetach = None if", "raise PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result =", "if result > 0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount =", "ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice) if result > 0: raise", "setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype =", "result > 0: raise PhidgetException(result) return _ChannelSubclass.value def close(self): __func", "= PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8')", "PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func =", "0: raise PhidgetException(result) return _Channel.value def setChannel(self, Channel): _Channel =", "PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if result", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if result > 0:", "def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype", "result = __func(_flags) if result > 0: raise PhidgetException(result) @staticmethod", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result > 0:", "ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange =", "> 0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func", "raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func", "ctypes.c_int32 res = __func(self.handle, self._onAttach, None) except RuntimeError: self._Attach =", "and self.handle.value == other.handle.value def __hash__(self): return self.handle.value def __str__(self):", "> 0: raise PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func", "result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0: raise PhidgetException(result)", "> 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass =", "= PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onAttach, None)", "= PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if", "= None @staticmethod def finalize(flags): _flags = ctypes.c_int32(flags) __func =", "__func.restype = ctypes.c_int32 result = __func(self.handle, _Channel) if result >", "= PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if", "= ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if result > 0:", "ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange = None def __eq__(self, other):", "else: self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func =", "def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype =", "0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int()", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0:", "result > 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel):", "resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result = __func()", "_DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result", "= __func(self.handle, ctypes.byref(_DeviceClass)) if result > 0: raise PhidgetException(result) return", "_localAttachEvent(self, handle, userPtr): if self._Attach == None: return self._Attach(self) def", "None: return Description = Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self,", "= __func(self.handle, ctypes.byref(_DeviceVersion)) if result > 0: raise PhidgetException(result) return", "ctypes.byref(_Parent)) if result > 0: raise PhidgetException(result) __Parent = Phidget()", "self._onPropertyChange = None @staticmethod def finalize(flags): _flags = ctypes.c_int32(flags) __func", "__func.restype = ctypes.c_int32 result = __func(self.handle, _HubPort) if result >", "getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32", "return Description = Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self, handler):", "= __func(self.handle, ctypes.byref(_value), _valueLen) return _value.value.decode('utf- 8') def __del__(self): __func", "None self._onAttach = None else: self._Attach = handler self._onAttach =", "userPtr): if self._Attach == None: return self._Attach(self) def setOnAttachHandler(self, handler):", "-1 ANY_LABEL = None INFINITE_TIMEOUT = 0 DEFAULT_TIMEOUT = 1000", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype = ctypes.c_int32 result =", "PhidgetException(result) return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func =", "def getIsRemote(self): _IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype =", "_HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result", "0: raise PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal =", "PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPortSpeed) if result", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result = __func(self.handle,", "else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None self._onAttach", "getChannelName(self): _ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32", "self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange", "== None: self._Detach = None self._onDetach = None else: self._Detach", "= __func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0: raise PhidgetException(result) return", "else: self._Attach = handler self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func =", "PhidgetSupport.getDll().Phidget_delete __func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle = None", "0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1 ANY_CHANNEL", "return _Attached.value def getChannel(self): _Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel", "PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if result", "if result > 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self):", "= ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result =", "> 0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func", "other.handle.value def __hash__(self): return self.handle.value def __str__(self): _value = (ctypes.c_char", "result = __func(self.handle, ctypes.byref(_Attached)) if result > 0: raise PhidgetException(result)", "__func(self.handle, ctypes.byref(_ChannelName)) if result > 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8')", "if result > 0: raise PhidgetException(result) @staticmethod def getLibraryVersion(): _LibraryVersion", "= None else: self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent) try:", "return _ServerUniqueName.value.decode('utf-8') def getMaxVINTDeviceSpeed(self): _MaxVINTDeviceSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed", "raise PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote", "> 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel", "propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if handler", "__func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise PhidgetException(result) return _MaxHubPortSpeed.value", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0:", "__func(self.handle, _Channel) if result > 0: raise PhidgetException(result) def getChannelClass(self):", "res = __func(self.handle, self._onAttach, None) except RuntimeError: self._Attach = None", "result > 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID", "RuntimeError: self._PropertyChange = None self._onPropertyChange = None @staticmethod def finalize(flags):", "= __func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0: raise PhidgetException(result) return", "result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise PhidgetException(result)", "PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle, ctypes.byref(_value), _valueLen)", "getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32", "open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result = __func(self.handle)", "= __func(_flags) if result > 0: raise PhidgetException(result) @staticmethod def", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result =", "return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func =", "__Hub def getHubPort(self): _HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype", "= ctypes.c_int32 result = __func(_flags) if result > 0: raise", "if result > 0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass =", "result > 0: raise PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int()", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result >", "= ctypes.c_int32 res = __func(self.handle, self._onDetach, None) except RuntimeError: self._Detach", "= PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if", "= self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res", "ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p,", "raise PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort)", "__func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange = None self._onPropertyChange =", "__func = PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result = __func(_flags) if", "* 65536)() _valueLen = ctypes.c_int32(65536) if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo", "PhidgetException(result) __Parent = Phidget() __Parent.handle = _Parent return __Parent def", "result = __func(ctypes.byref(_LibraryVersionNumber)) if result > 0: raise PhidgetException(result) return", "return _ChannelClassName.value.decode('utf-8') def getChannelName(self): _ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName", "= ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result =", "_IsHubPortDevice.value def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice", "Description = Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self, handler): if", "self._onDetach = None def _localErrorEvent(self, handle, userPtr, Code, Description): if", "ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) return _ServerName.value.decode('utf-8') def", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32 result =", "return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice", "return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName", "= None else: self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent) try:", "result = __func(self.handle, ctypes.byref(_ChannelClass)) if result > 0: raise PhidgetException(result)", "= __func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0: raise PhidgetException(result) return", "DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException import PhidgetException class", "> 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8') def getDeviceSerialNumber(self): _DeviceSerialNumber =", "'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None,", "def setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype", "__func(self.handle, self._onError, None) except RuntimeError: self._Error = None self._onError =", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result =", "def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype", "PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype", "propertyName): if self._PropertyChange == None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self,", "result > 0: raise PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p()", "__func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, _IsHubPortDevice)", "ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def", "__func(self.handle, ctypes.byref(_ChannelClass)) if result > 0: raise PhidgetException(result) return _ChannelClass.value", "raise PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32", "getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32", "result > 0: raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result >", "return _value.value.decode('utf- 8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype =", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if result > 0: raise", "self._PropertyChange(self, propertyName) def setOnPropertyChangeHandler(self, handler): if handler == None: self._PropertyChange", "raise PhidgetException(result) return _DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass __func.restype = ctypes.c_int32 result =", "getHub(self): _Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32", "> 0: raise PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort): _HubPort", "from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import * from Phidget22.ChannelClass", "except RuntimeError: self._Error = None self._onError = None def _localPropertyChangeEvent(self,", "raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel): _DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8'))", "__func(self.handle, ctypes.byref(_Parent)) if result > 0: raise PhidgetException(result) __Parent =", "ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle,", "__func.restype = ctypes.c_int32 result = __func(self.handle) if result > 0:", "return __Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerHostname", "None self._onPropertyChange = None def __eq__(self, other): return hasattr(other, 'handle')", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result =", "result = __func(self.handle, ctypes.byref(_ServerUniqueName)) if result > 0: raise PhidgetException(result)", "PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber) if result", "PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype", "__func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceVersion))", "= PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None)", "= __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0: raise PhidgetException(result) return", "__func.restype = ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle = None if", "ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError =", "PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if result", "handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype =", "__func(self.handle, ctypes.byref(_IsRemote)) if result > 0: raise PhidgetException(result) return _IsRemote.value", "if result > 0: raise PhidgetException(result) def getIsLocal(self): _IsLocal =", "result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0: raise PhidgetException(result)", "= ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32 result =", "_DeviceSerialNumber) if result > 0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU", "= __func(self.handle, ctypes.byref(_IsLocal)) if result > 0: raise PhidgetException(result) return", "_HubPort) if result > 0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount", "ctypes.c_int32 result = __func(self.handle) if result > 0: raise PhidgetException(result)", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result = __func(self.handle,", "PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if self._Attach == None: return", "raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func", "Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import * from Phidget22.ChannelClass import", "_ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result", "PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result", "PhidgetException(result) return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func", "ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach", "__func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if result > 0:", "= ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result =", "= PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if", "if handler == None: self._PropertyChange = None self._onPropertyChange = None", "_HubPortSpeed) if result > 0: raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed", "= __func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0: raise PhidgetException(result) return", "ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def", "result = __func(self.handle, _DeviceSerialNumber) if result > 0: raise PhidgetException(result)", "raise PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func", "def getChannelName(self): _ChannelName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelName __func.restype =", "> 0: raise PhidgetException(result) return _Attached.value def getChannel(self): _Channel =", "PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype", "> 0: raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if self._Attach", "= ctypes.c_int32 result = __func(self.handle) if result > 0: raise", "ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result = __func(self.handle,", "HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype = ctypes.c_int32", "result = __func(self.handle, _IsLocal) if result > 0: raise PhidgetException(result)", "sys import ctypes from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import", "PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onAttach, None) except", "ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result > 0:", "0: raise PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed =", "_IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result", "import ctypes from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async import *", "= -1 ANY_LABEL = None INFINITE_TIMEOUT = 0 DEFAULT_TIMEOUT =", "> 0: raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout)", "__func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed))", "_timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result", "Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import", "= PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if", "= handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype", "handler == None: self._PropertyChange = None self._onPropertyChange = None else:", "result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise PhidgetException(result)", "PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result", "getIsLocal(self): _IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32", "if sys.platform == 'win32': self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p)", "def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype =", "0: raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p()", "Code, Description) def setOnErrorHandler(self, handler): if handler == None: self._Error", "result > 0: raise PhidgetException(result) return _HubPort.value def setHubPort(self, HubPort):", "def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype =", "return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelSubclass", "@staticmethod def finalize(flags): _flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype", "= PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if", "raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func", "def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype =", "> 0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func", "__func(self.handle, ctypes.byref(_DeviceName)) if result > 0: raise PhidgetException(result) return _DeviceName.value.decode('utf-8')", "result > 0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p()", "if self.getIsChannel(): __func = PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result", "_ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result =", "ctypes.byref(_DeviceClassName)) if result > 0: raise PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def", "= ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result =", "ctypes.byref(_IsRemote)) if result > 0: raise PhidgetException(result) return _IsRemote.value def", "if result > 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self):", "= __func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange = None self._onPropertyChange", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise", "userPtr, Code, Description): if self._Error == None: return Description =", "__func(self.handle, ctypes.byref(_deviceLabel)) if result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER =", "__func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result = __func(self.handle, _Channel)", "if handler == None: self._Error = None self._onError = None", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if result > 0: raise", "ctypes.c_void_p, ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach =", "res > 0: raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if", "res = __func(ctypes.byref(self.handle)) self.handle = None if res > 0:", "Channel): _Channel = ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0:", "= None self._onAttach = None if sys.platform == 'win32': self._DetachFactory", "import PhidgetSupport from Phidget22.Async import * from Phidget22.ChannelClass import ChannelClass", "0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName = ctypes.c_char_p()", "self._Detach(self) def setOnDetachHandler(self, handler): if handler == None: self._Detach =", "__func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, _DeviceSerialNumber)", "result > 0: raise PhidgetException(result) return _ChannelName.value.decode('utf-8') def getChannelSubclass(self): _ChannelSubclass", "PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersion)) if result >", "__func.restype = ctypes.c_int32 result = __func() if result > 0:", "__func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result = __func(self.handle) if", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Attached)) if result > 0: raise", "None self._onPropertyChange = None @staticmethod def finalize(flags): _flags = ctypes.c_int32(flags)", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if result >", "getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32", "def finalize(flags): _flags = ctypes.c_int32(flags) __func = PhidgetSupport.getDll().Phidget_finalize __func.restype =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel)) if result >", "def getHubPort(self): _HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype =", "0: raise PhidgetException(result) def getIsRemote(self): _IsRemote = ctypes.c_int() __func =", "result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result > 0: raise PhidgetException(result)", "if result > 0: raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self):", "result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result)", "self._onAttach = self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if result >", "__func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote))", "PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result >", "PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if result", "raise PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote)", "if result > 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self):", "= ctypes.c_int32(DeviceSerialNumber) __func = PhidgetSupport.getDll().Phidget_setDeviceSerialNumber __func.restype = ctypes.c_int32 result =", "0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName = ctypes.c_char_p()", "getAttached(self): _Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32", "= PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if result > 0: raise", "__func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8')", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if result >", "raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int() __func", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0:", "= ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle = None if res", "__func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceLabel))", "> 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel =", "PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype", "= PhidgetSupport.getDll().Phidget_getChannelName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelName)) if", "> 0: raise PhidgetException(result) def getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls)", "__func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed))", "ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p)", "_HubPort.value def setHubPort(self, HubPort): _HubPort = ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort", "__func(self.handle, _HubPort) if result > 0: raise PhidgetException(result) def getHubPortCount(self):", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if result > 0: raise", "result > 0: raise PhidgetException(result) def getIsLocal(self): _IsLocal = ctypes.c_int()", "ctypes.byref(_IsHubPortDevice)) if result > 0: raise PhidgetException(result) return _IsHubPortDevice.value def", "ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1 ANY_CHANNEL = -1 ANY_LABEL", "PhidgetSupport.getDll().Phidget_getHub __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if result", "return _DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel", "None: self._PropertyChange = None self._onPropertyChange = None else: self._PropertyChange =", "self._Error(self, Code, Description) def setOnErrorHandler(self, handler): if handler == None:", "_DeviceClass.value def getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype", "_Attached.value def getChannel(self): _Channel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannel __func.restype", "PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result = __func(self.handle, _timeout) if result", "ctypes.byref(_MaxHubPortSpeed)) if result > 0: raise PhidgetException(result) return _MaxHubPortSpeed.value def", "= PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPort)) if", "__func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass))", "__func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber))", "PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote = ctypes.c_int(IsRemote) __func", "self._onPropertyChange = None def __eq__(self, other): return hasattr(other, 'handle') and", "self._onError = self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32", "None: self._Attach = None self._onAttach = None else: self._Attach =", "_DeviceLabel = ctypes.create_string_buffer(DeviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setDeviceLabel __func.restype = ctypes.c_int32 result", "0: raise PhidgetException(result) def getHubPortCount(self): _HubPortCount = ctypes.c_int() __func =", "if sys.platform == 'win32': self._AttachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else:", "ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle,", "if result > 0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU =", "result > 0: raise PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open", "__func(self.handle, _DeviceSerialNumber) if result > 0: raise PhidgetException(result) def getDeviceSKU(self):", "_DeviceSKU.value.decode('utf-8') def getDeviceVersion(self): _DeviceVersion = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceVersion __func.restype", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result =", "__func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8')", "ctypes.byref(_DeviceSerialNumber)) if result > 0: raise PhidgetException(result) return _DeviceSerialNumber.value def", "ErrorEventCode from Phidget22.PhidgetException import PhidgetException class Phidget: def __init__(self): self.handle", "= PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count))", "self._Detach == None: return self._Detach(self) def setOnDetachHandler(self, handler): if handler", "__Hub = Phidget() __Hub.handle = _Hub return __Hub def getHubPort(self):", "def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype =", "PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result", "0: raise PhidgetException(result) __Hub = Phidget() __Hub.handle = _Hub return", "return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed", "__func(self.handle) if result > 0: raise PhidgetException(result) def openWaitForAttachment(self, timeout):", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32 result =", "= __func(self.handle, ctypes.byref(_IsChannel)) if result > 0: raise PhidgetException(result) return", "raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU", "result > 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName", "self._onError = None def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange", "None) except RuntimeError: self._Detach = None self._onDetach = None def", "None if sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p,", "__func(self.handle, ctypes.byref(_ServerName)) if result > 0: raise PhidgetException(result) def getServerPeerName(self):", "__func(self.handle, ctypes.byref(_DeviceSKU)) if result > 0: raise PhidgetException(result) return _DeviceSKU.value.decode('utf-8')", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceName)) if result >", "PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result = __func(self.handle) if result >", "> 0: raise PhidgetException(result) __Hub = Phidget() __Hub.handle = _Hub", "__func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result = __func() if", "getDeviceChannelCount(self, cls): _cls = ctypes.c_int(cls) _count = ctypes.c_uint32() __func =", "None if res > 0: raise PhidgetException(res) def _localAttachEvent(self, handle,", "if result > 0: raise PhidgetException(result) return _count.value def getDeviceClass(self):", "import * from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass", "PhidgetException class Phidget: def __init__(self): self.handle = ctypes.c_void_p() if sys.platform", "if res > 0: raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr):", "PhidgetException(result) return _HubPortSpeed.value def setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func", "ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error = None self._onError = None", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0: raise", "import ErrorEventCode from Phidget22.PhidgetException import PhidgetException class Phidget: def __init__(self):", "= ctypes.c_int32 result = __func(self.handle, _IsLocal) if result > 0:", "= _Hub return __Hub def getHubPort(self): _HubPort = ctypes.c_int() __func", "= __func(self.handle) if result > 0: raise PhidgetException(result) def openWaitForAttachment(self,", "Phidget22.ChannelSubclass import ChannelSubclass from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import", "= -1 ANY_HUB_PORT = -1 ANY_CHANNEL = -1 ANY_LABEL =", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if result > 0:", "= __func(self.handle, ctypes.byref(_DeviceName)) if result > 0: raise PhidgetException(result) return", "PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result = __func(self.handle, _cls, ctypes.byref(_count)) if", "> 0: raise PhidgetException(result) __Parent = Phidget() __Parent.handle = _Parent", "_cls = ctypes.c_int(cls) _count = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype", "raise PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName", "None def _localDetachEvent(self, handle, userPtr): if self._Detach == None: return", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result =", "self._Attach = None self._onAttach = None else: self._Attach = handler", "_localPropertyChangeEvent(self, handle, userPtr, propertyName): if self._PropertyChange == None: return propertyName", "from Phidget22.PhidgetException import PhidgetException class Phidget: def __init__(self): self.handle =", "if result > 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self):", "> 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func", "0: raise PhidgetException(result) return _Attached.value def getChannel(self): _Channel = ctypes.c_int()", "self._Error = None self._onError = None else: self._Error = handler", "None self._onError = None if sys.platform == 'win32': self._PropertyChangeFactory =", "_IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype = ctypes.c_int32 result", "if sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else:", "_LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result", "if self._Attach == None: return self._Attach(self) def setOnAttachHandler(self, handler): if", "PhidgetSupport.getDll().Phidget_getServerHostname __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if result", "result = __func(self.handle, ctypes.byref(_IsChannel)) if result > 0: raise PhidgetException(result)", "def getAttached(self): _Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype =", "if result > 0: raise PhidgetException(result) def getParent(self): _Parent =", "raise PhidgetException(result) return _ServerName.value.decode('utf-8') def setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8'))", "self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res =", "self.handle = None if res > 0: raise PhidgetException(res) def", "= handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype", "= ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result =", "ctypes.c_int(HubPort) __func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.c_int32 res = __func(ctypes.byref(self.handle)) self.handle = None if res >", "userPtr, propertyName): if self._PropertyChange == None: return propertyName = propertyName.decode('utf-8')", "> 0: raise PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if result > 0: raise", "= ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result =", "> 0: raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed =", "_Channel) if result > 0: raise PhidgetException(result) def getChannelClass(self): _ChannelClass", "result > 0: raise PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed", "= handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if result > 0:", "self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res =", "if handler == None: self._Attach = None self._onAttach = None", "getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32", "0: raise PhidgetException(result) return _ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close", "def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype =", "getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if result > 0: raise", "ANY_CHANNEL = -1 ANY_LABEL = None INFINITE_TIMEOUT = 0 DEFAULT_TIMEOUT", "ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p,", "None else: self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func", "None self._onError = None def _localPropertyChangeEvent(self, handle, userPtr, propertyName): if", "= ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p)", "None def __eq__(self, other): return hasattr(other, 'handle') and self.handle.value ==", "= PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result = __func(self.handle) if result", "_DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result", "None self._onDetach = None def _localErrorEvent(self, handle, userPtr, Code, Description):", "raise PhidgetException(result) def getMaxHubPortSpeed(self): _MaxHubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed", "= PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if", "if result > 0: raise PhidgetException(result) def open(self): __func =", "class Phidget: def __init__(self): self.handle = ctypes.c_void_p() if sys.platform ==", "def openWaitForAttachment(self, timeout): _timeout = ctypes.c_uint32(timeout) __func = PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype", "from Phidget22.DeviceClass import DeviceClass from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode", "PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func =", "__func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result > 0: raise PhidgetException(result) return _DeviceSerialNumber.value", "0: raise PhidgetException(result) def getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func =", "PhidgetException(result) def getAttached(self): _Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype", "self._Error = handler self._onError = self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler", "setServerName(self, ServerName): _ServerName = ctypes.create_string_buffer(ServerName.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_setServerName __func.restype =", "ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,", "return _ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getChannelClassName", "ctypes.c_char_p) else: self._ErrorFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) self._Error", "close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype = ctypes.c_int32 result = __func(self.handle)", "_HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed = ctypes.c_uint32() __func = PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype", "PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getParent __func.restype", "if result > 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def", "_Attached = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getAttached __func.restype = ctypes.c_int32 result", "0: raise PhidgetException(result) __Parent = Phidget() __Parent.handle = _Parent return", "other): return hasattr(other, 'handle') and self.handle.value == other.handle.value def __hash__(self):", "Phidget() __Hub.handle = _Hub return __Hub def getHubPort(self): _HubPort =", "= self._AttachFactory(self._localAttachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res", "0: raise PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p() __func =", "result = __func(self.handle, ctypes.byref(_DeviceName)) if result > 0: raise PhidgetException(result)", "self._onDetach = self._DetachFactory(self._localDetachEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32", "ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle,", "_DeviceID.value def getDeviceLabel(self): _DeviceLabel = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceLabel __func.restype", "= ctypes.c_int(Channel) __func = PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result =", "return _IsLocal.value def setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func =", "= PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if", "result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise PhidgetException(result)", "self.handle.value def __str__(self): _value = (ctypes.c_char * 65536)() _valueLen =", "= __func(self.handle, _Channel) if result > 0: raise PhidgetException(result) def", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerName)) if result >", "handle, userPtr): if self._Attach == None: return self._Attach(self) def setOnAttachHandler(self,", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result >", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSKU)) if result >", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise", "setOnDetachHandler(self, handler): if handler == None: self._Detach = None self._onDetach", "Description) def setOnErrorHandler(self, handler): if handler == None: self._Error =", "__func(self.handle, ctypes.byref(_HubPortCount)) if result > 0: raise PhidgetException(result) return _HubPortCount.value", "-1 ANY_CHANNEL = -1 ANY_LABEL = None INFINITE_TIMEOUT = 0", "= PhidgetSupport.getDll().Phidget_getServerPeerName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerPeerName)) if", "= __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if result > 0: raise PhidgetException(result) return", "raise PhidgetException(result) def getServerPeerName(self): _ServerPeerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerPeerName", "__func = PhidgetSupport.getDll().Phidget_getMaxVINTDeviceSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed))", "__func = PhidgetSupport.getDll().Phidget_getMaxHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_MaxHubPortSpeed))", "PhidgetSupport.getDll().Phidget_setOnDetachHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onDetach, None) except", "Description.decode('utf-8') self._Error(self, Code, Description) def setOnErrorHandler(self, handler): if handler ==", "= __func(self.handle, ctypes.byref(_DeviceID)) if result > 0: raise PhidgetException(result) return", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if result >", "_localDetachEvent(self, handle, userPtr): if self._Detach == None: return self._Detach(self) def", "= PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice)) if", "= __func(self.handle, _IsLocal) if result > 0: raise PhidgetException(result) def", "ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result = __func(self.handle,", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if result > 0: raise", "self._onDetach, None) except RuntimeError: self._Detach = None self._onDetach = None", "if result > 0: raise PhidgetException(result) return _VINTDeviceSupportsSetSpeed.value def writeDeviceLabel(self,", "= PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Hub)) if result > 0:", "self._PropertyChange = handler self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler", "if result > 0: raise PhidgetException(result) return _Attached.value def getChannel(self):", "raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName = ctypes.c_char_p() __func", "_DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32 result", "== None: self._Error = None self._onError = None else: self._Error", "raise PhidgetException(result) def getChannelClass(self): _ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass", "def getHub(self): _Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype =", "@staticmethod def getLibraryVersion(): _LibraryVersion = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersion __func.restype", "setHubPortSpeed(self, HubPortSpeed): _HubPortSpeed = ctypes.c_uint32(HubPortSpeed) __func = PhidgetSupport.getDll().Phidget_setHubPortSpeed __func.restype =", "PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if result", "_DeviceVersion.value def getHub(self): _Hub = ctypes.c_void_p() __func = PhidgetSupport.getDll().Phidget_getHub __func.restype", "ctypes.c_void_p) else: self._AttachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Attach = None", "> 0: raise PhidgetException(result) def getParent(self): _Parent = ctypes.c_void_p() __func", "None) except RuntimeError: self._Error = None self._onError = None def", "if result > 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self,", "result > 0: raise PhidgetException(result) return _DeviceLabel.value.decode('utf-8') def setDeviceLabel(self, DeviceLabel):", "= PhidgetSupport.getDll().Phidget_openWaitForAttachment __func.restype = ctypes.c_int32 result = __func(self.handle, _timeout) if", "def writeDeviceLabel(self, deviceLabel): _deviceLabel = ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype", "__func.restype = ctypes.c_int32 result = __func(_flags) if result > 0:", "__func = PhidgetSupport.getDll().Phidget_getIsChannel __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsChannel))", "_MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype", "0: raise PhidgetException(res) def _localAttachEvent(self, handle, userPtr): if self._Attach ==", "getIsRemote(self): _IsRemote = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32", "PhidgetException(result) return _Channel.value def setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func", "PhidgetSupport.getDll().Phidget_getChannelClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result", "PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int() __func =", "ctypes.byref(_deviceLabel)) if result > 0: raise PhidgetException(result) ANY_SERIAL_NUMBER = -1", "if self._Error == None: return Description = Description.decode('utf-8') self._Error(self, Code,", "result = __func(self.handle, ctypes.byref(_DeviceClass)) if result > 0: raise PhidgetException(result)", "__func(ctypes.byref(_LibraryVersion)) if result > 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod", "= None self._onDetach = None else: self._Detach = handler self._onDetach", "_IsRemote) if result > 0: raise PhidgetException(result) def open(self): __func", "from Phidget22.DeviceID import DeviceID from Phidget22.ErrorEventCode import ErrorEventCode from Phidget22.PhidgetException", "* from Phidget22.ChannelClass import ChannelClass from Phidget22.ChannelSubclass import ChannelSubclass from", "= ctypes.c_int(IsRemote) __func = PhidgetSupport.getDll().Phidget_setIsRemote __func.restype = ctypes.c_int32 result =", "def setOnPropertyChangeHandler(self, handler): if handler == None: self._PropertyChange = None", "= __func(self.handle, ctypes.byref(_DeviceLabel)) if result > 0: raise PhidgetException(result) return", "== None: return self._Attach(self) def setOnAttachHandler(self, handler): if handler ==", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClass)) if result > 0: raise", "result > 0: raise PhidgetException(result) return _count.value def getDeviceClass(self): _DeviceClass", "PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None) except", "if result > 0: raise PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def", "= PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result = __func(ctypes.byref(_LibraryVersionNumber)) if result", "self._Attach = None self._onAttach = None if sys.platform == 'win32':", "PhidgetException(result) return _DeviceClassName.value.decode('utf-8') def getDeviceID(self): _DeviceID = ctypes.c_int() __func =", "import sys import ctypes from Phidget22.PhidgetSupport import PhidgetSupport from Phidget22.Async", "_valueLen) return _value.value.decode('utf- 8') def __del__(self): __func = PhidgetSupport.getDll().Phidget_delete __func.restype", "def getIsLocal(self): _IsLocal = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsLocal __func.restype =", "PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortCount)) if result", "handler == None: self._Detach = None self._onDetach = None else:", "PhidgetException(result) return _MaxHubPortSpeed.value def getHubPortSupportsSetSpeed(self): _HubPortSupportsSetSpeed = ctypes.c_int() __func =", "_ServerUniqueName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result", "if sys.platform == 'win32': self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int,", "PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPort) if result", "if result > 0: raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self):", "= PhidgetSupport.getDll().Phidget_getParent __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Parent)) if", "__Parent = Phidget() __Parent.handle = _Parent return __Parent def getServerHostname(self):", "propertyName) def setOnPropertyChangeHandler(self, handler): if handler == None: self._PropertyChange =", "self._onDetach = None else: self._Detach = handler self._onDetach = self._DetachFactory(self._localDetachEvent)", "= PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClass)) if", "getServerName(self): _ServerName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getServerName __func.restype = ctypes.c_int32", "ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPortCount __func.restype = ctypes.c_int32 result = __func(self.handle,", "PhidgetException(result) def getDeviceName(self): _DeviceName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceName __func.restype", "def getDeviceSerialNumber(self): _DeviceSerialNumber = ctypes.c_int32() __func = PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype =", "> 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName =", "self._onPropertyChange = self._PropertyChangeFactory(self._localPropertyChangeEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnPropertyChangeHandler __func.restype = ctypes.c_int32", "PhidgetSupport.getDll().Phidget_setChannel __func.restype = ctypes.c_int32 result = __func(self.handle, _Channel) if result", "= None self._onError = None if sys.platform == 'win32': self._PropertyChangeFactory", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerHostname)) if result > 0:", "0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value def getVINTDeviceSupportsSetSpeed(self): _VINTDeviceSupportsSetSpeed = ctypes.c_int()", "= ctypes.create_string_buffer(deviceLabel.encode('utf-8')) __func = PhidgetSupport.getDll().Phidget_writeDeviceLabel __func.restype = ctypes.c_int32 result =", "None: return self._Detach(self) def setOnDetachHandler(self, handler): if handler == None:", "PhidgetSupport.getDll().Phidget_getVINTDeviceSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result", "__func = PhidgetSupport.getDll().Phidget_getDeviceChannelCount __func.restype = ctypes.c_int32 result = __func(self.handle, _cls,", "return self._Attach(self) def setOnAttachHandler(self, handler): if handler == None: self._Attach", "= ctypes.c_int32 res = __func(self.handle, self._onPropertyChange, None) except RuntimeError: self._PropertyChange", "_DeviceID = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceID __func.restype = ctypes.c_int32 result", "result > 0: raise PhidgetException(result) return _ChannelClass.value def getChannelClassName(self): _ChannelClassName", "None self._onDetach = None else: self._Detach = handler self._onDetach =", "def getChannelClass(self): _ChannelClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getChannelClass __func.restype =", "PhidgetException(result) return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func", "= PhidgetSupport.getDll().Phidget_getIsLocal __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsLocal)) if", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0: raise", "= self._ErrorFactory(self._localErrorEvent) try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res", "PhidgetException(result) return _Attached.value def getChannel(self): _Channel = ctypes.c_int() __func =", "PhidgetSupport.getDll().Phidget_getIsRemote __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsRemote)) if result", "self._ErrorFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_char_p) else: self._ErrorFactory =", "PhidgetSupport.getDll().Phidget_getDeviceSerialNumber __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceSerialNumber)) if result", "__func(self.handle, ctypes.byref(_MaxVINTDeviceSpeed)) if result > 0: raise PhidgetException(result) return _MaxVINTDeviceSpeed.value", "result > 0: raise PhidgetException(result) def openWaitForAttachment(self, timeout): _timeout =", "getDeviceClassName(self): _DeviceClassName = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32", "PhidgetException(result) return _DeviceSerialNumber.value def setDeviceSerialNumber(self, DeviceSerialNumber): _DeviceSerialNumber = ctypes.c_int32(DeviceSerialNumber) __func", "= ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0:", "setIsLocal(self, IsLocal): _IsLocal = ctypes.c_int(IsLocal) __func = PhidgetSupport.getDll().Phidget_setIsLocal __func.restype =", "result > 0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary():", "ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange = None def __eq__(self,", "__func(self.handle, self._onAttach, None) except RuntimeError: self._Attach = None self._onAttach =", "Phidget() __Parent.handle = _Parent return __Parent def getServerHostname(self): _ServerHostname =", "0: raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func =", "__func() if result > 0: raise PhidgetException(result) def getAttached(self): _Attached", "= None self._onPropertyChange = None def __eq__(self, other): return hasattr(other,", "__func = PhidgetSupport.getDll().channelInfo else: __func = PhidgetSupport.getDll().deviceInfo result = __func(self.handle,", "PhidgetSupport.getDll().Phidget_getHubPortSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSpeed)) if result", "return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getIsChannel", "def __str__(self): _value = (ctypes.c_char * 65536)() _valueLen = ctypes.c_int32(65536)", "ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) self._PropertyChange = None self._onPropertyChange = None def", "if self._PropertyChange == None: return propertyName = propertyName.decode('utf-8') self._PropertyChange(self, propertyName)", "handler): if handler == None: self._Attach = None self._onAttach =", "PhidgetSupport.getDll().Phidget_getDeviceClassName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceClassName)) if result", "ctypes.byref(_HubPort)) if result > 0: raise PhidgetException(result) return _HubPort.value def", "== None: self._PropertyChange = None self._onPropertyChange = None else: self._PropertyChange", "try: __func = PhidgetSupport.getDll().Phidget_setOnErrorHandler __func.restype = ctypes.c_int32 res = __func(self.handle,", "return _LibraryVersion.value.decode('utf-8') @staticmethod def getLibraryVersionNumber(): _LibraryVersionNumber = ctypes.c_char_p() __func =", "getHubPort(self): _HubPort = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getHubPort __func.restype = ctypes.c_int32", "raise PhidgetException(result) return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary", "0: raise PhidgetException(result) return _IsChannel.value def getIsHubPortDevice(self): _IsHubPortDevice = ctypes.c_int()", "try: __func = PhidgetSupport.getDll().Phidget_setOnAttachHandler __func.restype = ctypes.c_int32 res = __func(self.handle,", "return _Channel.value def setChannel(self, Channel): _Channel = ctypes.c_int(Channel) __func =", "_Parent return __Parent def getServerHostname(self): _ServerHostname = ctypes.c_char_p() __func =", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_Channel)) if result >", "ctypes.byref(_Hub)) if result > 0: raise PhidgetException(result) __Hub = Phidget()", "getDeviceSKU(self): _DeviceSKU = ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getDeviceSKU __func.restype = ctypes.c_int32", "result > 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8') def getServerUniqueName(self): _ServerUniqueName", "ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach = None", "result = __func(self.handle, ctypes.byref(_ChannelClassName)) if result > 0: raise PhidgetException(result)", "__func = PhidgetSupport.getDll().Phidget_open __func.restype = ctypes.c_int32 result = __func(self.handle) if", "if result > 0: raise PhidgetException(result) return _ChannelClassName.value.decode('utf-8') def getChannelName(self):", "__func(self.handle, ctypes.byref(_IsLocal)) if result > 0: raise PhidgetException(result) return _IsLocal.value", "0: raise PhidgetException(result) def open(self): __func = PhidgetSupport.getDll().Phidget_open __func.restype =", "> 0: raise PhidgetException(result) return _ServerHostname.value.decode('utf-8') def getServerName(self): _ServerName =", "= PhidgetSupport.getDll().Phidget_getHubPortSupportsSetSpeed __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_HubPortSupportsSetSpeed)) if", "if result > 0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self):", "self._PropertyChangeFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None,", "__func(self.handle, ctypes.byref(_ServerPeerName)) if result > 0: raise PhidgetException(result) return _ServerPeerName.value.decode('utf-8')", "__func = PhidgetSupport.getDll().Phidget_getServerUniqueName __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_ServerUniqueName))", "__func(self.handle, ctypes.byref(_DeviceClass)) if result > 0: raise PhidgetException(result) return _DeviceClass.value", "if result > 0: raise PhidgetException(result) def getDeviceName(self): _DeviceName =", "ANY_HUB_PORT = -1 ANY_CHANNEL = -1 ANY_LABEL = None INFINITE_TIMEOUT", "getDeviceClass(self): _DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype = ctypes.c_int32", "ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) else: self._DetachFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p) self._Detach", "= __func(self.handle, ctypes.byref(_ChannelClass)) if result > 0: raise PhidgetException(result) return", "if result > 0: raise PhidgetException(result) def getIsRemote(self): _IsRemote =", "None def _localErrorEvent(self, handle, userPtr, Code, Description): if self._Error ==", "return _LibraryVersionNumber.value.decode('utf-8') @staticmethod def resetLibrary(): __func = PhidgetSupport.getDll().Phidget_resetLibrary __func.restype =", "None: self._Detach = None self._onDetach = None else: self._Detach =", "__func = PhidgetSupport.getDll().Phidget_getIsHubPortDevice __func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_IsHubPortDevice))", "self._Error = None self._onError = None if sys.platform == 'win32':", "raise PhidgetException(result) return _ChannelSubclass.value def close(self): __func = PhidgetSupport.getDll().Phidget_close __func.restype", "= None if sys.platform == 'win32': self._DetachFactory = ctypes.WINFUNCTYPE(None, ctypes.c_void_p,", "0: raise PhidgetException(result) return _HubPortSupportsSetSpeed.value def getIsChannel(self): _IsChannel = ctypes.c_int()", "PhidgetSupport.getDll().Phidget_resetLibrary __func.restype = ctypes.c_int32 result = __func() if result >", "__func = PhidgetSupport.getDll().Phidget_setHubPort __func.restype = ctypes.c_int32 result = __func(self.handle, _HubPort)", "setIsHubPortDevice(self, IsHubPortDevice): _IsHubPortDevice = ctypes.c_int(IsHubPortDevice) __func = PhidgetSupport.getDll().Phidget_setIsHubPortDevice __func.restype =", "= ctypes.c_char_p() __func = PhidgetSupport.getDll().Phidget_getLibraryVersionNumber __func.restype = ctypes.c_int32 result =", "PhidgetSupport.getDll().Phidget_finalize __func.restype = ctypes.c_int32 result = __func(_flags) if result >", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_VINTDeviceSupportsSetSpeed)) if result > 0: raise", "__func.restype = ctypes.c_int32 result = __func(self.handle, ctypes.byref(_DeviceID)) if result >", "ctypes.c_int32 result = __func(self.handle, ctypes.byref(_deviceLabel)) if result > 0: raise", "= ctypes.WINFUNCTYPE(None, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_char_p) else: self._PropertyChangeFactory = ctypes.CFUNCTYPE(None, ctypes.c_void_p,", "def getDeviceClass(self): _DeviceClass = ctypes.c_int() __func = PhidgetSupport.getDll().Phidget_getDeviceClass __func.restype =", "ctypes.c_int32 result = __func(_flags) if result > 0: raise PhidgetException(result)", "PhidgetException(result) ANY_SERIAL_NUMBER = -1 ANY_HUB_PORT = -1 ANY_CHANNEL = -1", "result = __func(self.handle, _HubPort) if result > 0: raise PhidgetException(result)", "def _localErrorEvent(self, handle, userPtr, Code, Description): if self._Error == None:", "0: raise PhidgetException(result) return _IsRemote.value def setIsRemote(self, IsRemote): _IsRemote =", "result > 0: raise PhidgetException(result) return _HubPortCount.value def getHubPortSpeed(self): _HubPortSpeed" ]
[ "request_data = {\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint,", "'/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids", "expected_auctionParameters = {'type': 'texas'} request_data = {\"data\": self.auction} entrypoint =", "self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data = {\"data\":", "create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data = {\"data\": self.auction} entrypoint", "expected_http_status = '201 Created' request_data = {\"data\": self.auction} entrypoint =", "= 2 request_data = {\"data\": self.auction} entrypoint = '/auctions' response", "auction['auctionPeriod'] = {'startDate': None} response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status,", "entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def", "response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids =", "= '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self):", "= self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2", "self.auction auction.pop('auctionPeriod') request_data = {\"data\": self.auction} entrypoint = '/auctions' response", "'422 Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod') request_data = {\"data\":", "expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity' auction =", "Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod') request_data = {\"data\": self.auction}", "entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status)", "def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data = {\"data\": self.auction}", "request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data =", "status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data = {\"data\": self.auction} entrypoint", "expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data = {\"data\":", "2 request_data = {\"data\": self.auction} entrypoint = '/auctions' response =", "= self.auction auction.pop('auctionPeriod') request_data = {\"data\": self.auction} entrypoint = '/auctions'", "self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] =", "{\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'],", "request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data = {\"data\": self.auction}", "expected_minNumberOfQualifiedBids = 2 request_data = {\"data\": self.auction} entrypoint = '/auctions'", "create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data = {\"data\": self.auction} entrypoint =", "response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data", "'/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters", "'/auctions' auction['auctionPeriod'] = {'startDate': None} response = self.app.post_json(entrypoint, request_data, status=422)", "= {\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data)", "self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data = {\"data\": self.auction} entrypoint =", "response = self.app.post_json(entrypoint, request_data) filename = 'docs/source/tutorial/create_auction.http' self.dump(response.request, response, filename)", "self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids)", "'/auctions' response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint =", "self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity' auction", "auction = self.auction auction.pop('auctionPeriod') request_data = {\"data\": self.auction} entrypoint =", "def create_auction(self): expected_http_status = '201 Created' request_data = {\"data\": self.auction}", "= '422 Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod') request_data =", "self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data", "response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters =", "entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status) def", "entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate': None} response = self.app.post_json(entrypoint,", "= '/auctions' response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint", "'201 Created' request_data = {\"data\": self.auction} entrypoint = '/auctions' response", "status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate': None}", "Created' request_data = {\"data\": self.auction} entrypoint = '/auctions' response =", "{\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status,", "'/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status", "auction.pop('auctionPeriod') request_data = {\"data\": self.auction} entrypoint = '/auctions' response =", "expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate': None} response =", "def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data = {\"data\": self.auction} entrypoint", "request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data", "{'type': 'texas'} request_data = {\"data\": self.auction} entrypoint = '/auctions' response", "entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def", "= '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self):", "create_auction(self): expected_http_status = '201 Created' request_data = {\"data\": self.auction} entrypoint", "= self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type':", "expected_http_status) def create_auction_check_minNumberOfQualifiedBids(self): expected_minNumberOfQualifiedBids = 2 request_data = {\"data\": self.auction}", "= {'type': 'texas'} request_data = {\"data\": self.auction} entrypoint = '/auctions'", "= self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data =", "request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate':", "= '/auctions' response = self.app.post_json(entrypoint, request_data) filename = 'docs/source/tutorial/create_auction.http' self.dump(response.request,", "'/auctions' response = self.app.post_json(entrypoint, request_data) filename = 'docs/source/tutorial/create_auction.http' self.dump(response.request, response,", "{\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'],", "self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self): request_data = {\"data\":", "'texas'} request_data = {\"data\": self.auction} entrypoint = '/auctions' response =", "= {\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data,", "expected_http_status = '422 Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod') request_data", "None} response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def create_auction_dump(self):", "entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) filename = 'docs/source/tutorial/create_auction.http'", "self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) filename =", "= self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422", "self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status,", "def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity' auction = self.auction", "= self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod']", "Entity' auction = self.auction auction.pop('auctionPeriod') request_data = {\"data\": self.auction} entrypoint", "self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'}", "= '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self):", "{\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) filename", "= '/auctions' auction['auctionPeriod'] = {'startDate': None} response = self.app.post_json(entrypoint, request_data,", "= '201 Created' request_data = {\"data\": self.auction} entrypoint = '/auctions'", "create_auction_dump(self): request_data = {\"data\": self.auction} entrypoint = '/auctions' response =", "{'startDate': None} response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) def", "def create_auction_dump(self): request_data = {\"data\": self.auction} entrypoint = '/auctions' response", "self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.status, expected_http_status)", "create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity' auction = self.auction auction.pop('auctionPeriod')", "self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable", "expected_http_status) def create_auction_dump(self): request_data = {\"data\": self.auction} entrypoint = '/auctions'", "self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters)", "{\"data\": self.auction} entrypoint = '/auctions' response = self.app.post_json(entrypoint, request_data, status=422)", "= {'startDate': None} response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status)", "self.assertEqual(response.json['data']['minNumberOfQualifiedBids'], expected_minNumberOfQualifiedBids) def create_auction_check_auctionParameters(self): expected_auctionParameters = {'type': 'texas'} request_data =", "self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions' auction['auctionPeriod'] = {'startDate': None} response", "request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status = '422 Unprocessable Entity'", "response = self.app.post_json(entrypoint, request_data, status=422) self.assertEqual(response.status, expected_http_status) entrypoint = '/auctions'", "response = self.app.post_json(entrypoint, request_data) self.assertEqual(response.json['data']['auctionParameters'], expected_auctionParameters) def create_auction_invalid_auctionPeriod(self): expected_http_status =" ]
[ "pass @contextmanager def app_with_scout(app=None, config=None): \"\"\" Context manager that configures", "finally: scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with", "1 tracked_request = tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"]", "len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert tracked_request.active_spans == []", "assert tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"]", "< (4, 0), reason=\"pytest fixtures added in Celery 4.0\" )", "setting up # logging - and stop it from interfering", "= tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests): # With", "app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert len(tracked_requests) == 1", "\"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not in tracked_requests[0].tags first_task_id =", "assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"]", "up # logging - and stop it from interfering with", "assert tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert (", ") assert result == \"Hello World!\" assert len(tracked_requests) == 1", "from interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def", "< 60.0 ) # Assume test took <60 seconds assert", "Enable Scout by default in tests. if config is None:", "according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall()", "\"queue_time\" not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with", "Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app finally:", "tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"] ==", "assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests): # With an empty", "= celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout by default in tests.", "assert \"parent_task_id\" not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"]", "# Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as", "tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans ==", "\"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans == [] assert", "\"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"] ==", ") # Assume test took <60 seconds assert tracked_request.active_spans ==", "app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello World!\" assert", "with app_with_scout(app=celery_app) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result ==", "result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert tracked_requests", "app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert tracked_requests == []", "from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION", "app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() | hello.si()).apply_async().get() assert", "(4, 0), reason=\"pytest fixtures added in Celery 4.0\" ) @setup_logging.connect", "assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans", "app_with_scout() as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello", "= pytest.mark.skipif( celery.VERSION < (4, 0), reason=\"pytest fixtures added in", "@app.task def hello(): return \"Hello World!\" # Setup according to", "With an empty config, \"monitor\" defaults to False. with app_with_scout(config={})", "test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: hello = app.tasks[\"tests.integration.test_celery.hello\"]", "from setting up # logging - and stop it from", "with Scout installed. \"\"\" if app is None: app =", "app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello World!\" assert len(tracked_requests) == 1", "span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app:", "( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get() ) assert result", "is None: app = celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout by", "app_with_scout(app=celery_app) as app: result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil", "result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello World!\" assert len(tracked_requests)", "World!\" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert tracked_request.active_spans", "len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\"", "string\"}) .get() ) assert result == \"Hello World!\" assert len(tracked_requests)", "absolute_import, division, print_function, unicode_literals from contextlib import contextmanager import celery", "\"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"]", "[] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation", "- and stop it from interfering with global state #", ") @setup_logging.connect def do_nothing(**kwargs): # Just by connecting to this", "\"\" assert tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert", "with app_with_scout(config={}) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result ==", "False assert tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"] == \"celery\" assert", "span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app)", "an empty config, \"monitor\" defaults to False. with app_with_scout(config={}) as", "celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout by default in tests. if", "hello.si()).apply_async().get() assert result == \"Hello World!\" assert len(tracked_requests) == 2", "tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not in", "== 1 tracked_request = tracked_requests[0] assert \"task_id\" in tracked_request.tags assert", "import celery import pytest from celery.signals import setup_logging import scout_apm.celery", "first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests): #", "signal, we prevent Celery from setting up # logging -", "= app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert len(tracked_requests) ==", "connecting to this signal, we prevent Celery from setting up", "assert result == \"Hello World!\" assert len(tracked_requests) == 1 tracked_request", "a Celery app with Scout installed. \"\"\" if app is", "by connecting to this signal, we prevent Celery from setting", "def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result =", "== [] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert", "config=None): \"\"\" Context manager that configures a Celery app with", "print_function, unicode_literals from contextlib import contextmanager import celery import pytest", "Scout by default in tests. if config is None: config", "scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall() # Reset Scout configuration.", "\"monitor\" defaults to False. with app_with_scout(config={}) as app: result =", "with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None):", "running the agent. config[\"core_agent_launch\"] = False @app.task def hello(): return", "== first_task_id def test_no_monitor(tracked_requests): # With an empty config, \"monitor\"", "tracked_requests): with app_with_scout(app=celery_app) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result", "= (hello.si() | hello.si()).apply_async().get() assert result == \"Hello World!\" assert", "\"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert", "\"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app:", "assert tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"]", "default in tests. if config is None: config = {\"monitor\":", "scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout()", "config[\"core_agent_launch\"] = False @app.task def hello(): return \"Hello World!\" #", "tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0]", "if config is None: config = {\"monitor\": True} # Disable", "assert tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans)", "http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None): \"\"\" Context manager that", "= {\"monitor\": True} # Disable running the agent. config[\"core_agent_launch\"] =", "app = celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout by default in", "from __future__ import absolute_import, division, print_function, unicode_literals from contextlib import", "Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4, 0),", "in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"] == \"\"", "\"unknown\" assert ( 0.0 <= tracked_request.tags[\"queue_time\"] < 60.0 ) #", "we prevent Celery from setting up # logging - and", "tracked_requests): with app_with_scout(app=celery_app) as app: result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\":", "def test_no_monitor(tracked_requests): # With an empty config, \"monitor\" defaults to", "contextlib import contextmanager import celery import pytest from celery.signals import", "def do_nothing(**kwargs): # Just by connecting to this signal, we", "= False @app.task def hello(): return \"Hello World!\" # Setup", "as app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() | hello.si()).apply_async().get()", "setup_logging import scout_apm.celery from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus", "assert tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"]", "| hello.si()).apply_async().get() assert result == \"Hello World!\" assert len(tracked_requests) ==", "division, print_function, unicode_literals from contextlib import contextmanager import celery import", "tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) ==", "app_with_scout(app=celery_app) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello", "Config.set(**config) scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall() # Reset Scout", "app finally: scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests):", "@skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result", "import scout_apm.celery from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus =", "assert \"queue_time\" not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests):", "4.0\" ) @setup_logging.connect def do_nothing(**kwargs): # Just by connecting to", "# Assume test took <60 seconds assert tracked_request.active_spans == []", "assert span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in span.tags @skip_unless_celery_4_plus", "= app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello World!\" assert len(tracked_requests) ==", "# Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app", "agent. config[\"core_agent_launch\"] = False @app.task def hello(): return \"Hello World!\"", "== \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as", "in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"] == \"unknown\"", "tracked_request.tags[\"queue\"] == \"unknown\" assert ( 0.0 <= tracked_request.tags[\"queue_time\"] < 60.0", "pytest from celery.signals import setup_logging import scout_apm.celery from scout_apm.api import", "if app is None: app = celery.Celery(\"tasks\", broker=\"memory://\") # Enable", "__future__ import absolute_import, division, print_function, unicode_literals from contextlib import contextmanager", "\"an evil string\"}) .get() ) assert result == \"Hello World!\"", "to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall() #", "tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests):", "the agent. config[\"core_agent_launch\"] = False @app.task def hello(): return \"Hello", "is False assert tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"] == \"celery\"", "1 tracked_request = tracked_requests[0] assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans)", "(hello.si() | hello.si()).apply_async().get() assert result == \"Hello World!\" assert len(tracked_requests)", "empty config, \"monitor\" defaults to False. with app_with_scout(config={}) as app:", "to this signal, we prevent Celery from setting up #", "https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield app finally: scout_apm.celery.uninstall() # Reset", "fixtures added in Celery 4.0\" ) @setup_logging.connect def do_nothing(**kwargs): #", "seconds assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span", "\"parent_task_id\" not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] ==", "\"unknown\" assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span", "tracked_requests[0] assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span", "http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4, 0), reason=\"pytest fixtures", "= tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True", "def app_with_scout(app=None, config=None): \"\"\" Context manager that configures a Celery", "result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get() )", "import pytest from celery.signals import setup_logging import scout_apm.celery from scout_apm.api", "try: yield app finally: scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all()", "assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert \"task_id\" in", "<60 seconds assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1", ".get() ) assert result == \"Hello World!\" assert len(tracked_requests) ==", "do_nothing(**kwargs): # Just by connecting to this signal, we prevent", "== \"Hello World!\" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0]", "# http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None): \"\"\" Context manager", "tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"] == \"\" assert", "True} # Disable running the agent. config[\"core_agent_launch\"] = False @app.task", "contextmanager import celery import pytest from celery.signals import setup_logging import", "as app: result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"})", "app with Scout installed. \"\"\" if app is None: app", "1 span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def", "celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert", "= tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False", "assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"]", "2 assert [t.complete_spans[0].operation for t in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\",", "== \"unknown\" assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1", "assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert tracked_request.active_spans ==", "assert [t.complete_spans[0].operation for t in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\",", "not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app)", "None: app = celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout by default", "app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get() ) assert result ==", "assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation for t in tracked_requests]", "tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"] == \"unknown\" assert", "tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert ( 0.0", "] assert \"parent_task_id\" not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert", "False. with app_with_scout(config={}) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result", "\"\"\" Context manager that configures a Celery app with Scout", "== 1 span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" assert", "tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False assert", "to False. with app_with_scout(config={}) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert", "assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with", "in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def", "Reset Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as app:", "# Enable Scout by default in tests. if config is", "app_with_scout(config={}) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello", "first_task_id def test_no_monitor(tracked_requests): # With an empty config, \"monitor\" defaults", "prevent Celery from setting up # logging - and stop", "60.0 ) # Assume test took <60 seconds assert tracked_request.active_spans", "{\"monitor\": True} # Disable running the agent. config[\"core_agent_launch\"] = False", "result = (hello.si() | hello.si()).apply_async().get() assert result == \"Hello World!\"", "Celery from setting up # logging - and stop it", "[ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not in tracked_requests[0].tags first_task_id", "tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"] ==", "added in Celery 4.0\" ) @setup_logging.connect def do_nothing(**kwargs): # Just", "coding=utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from contextlib", "it from interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager", "\"Hello World!\" # Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try:", "config is None: config = {\"monitor\": True} # Disable running", "assert len(tracked_request.complete_spans) == 1 span = tracked_request.complete_spans[0] assert span.operation ==", "None: config = {\"monitor\": True} # Disable running the agent.", "config, \"monitor\" defaults to False. with app_with_scout(config={}) as app: result", "== [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not in tracked_requests[0].tags", "assert result == \"Hello World!\" assert len(tracked_requests) == 2 assert", "World!\" assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation for t in", "this signal, we prevent Celery from setting up # logging", "Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply()", "len(tracked_requests) == 2 assert [t.complete_spans[0].operation for t in tracked_requests] ==", "\"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert ( 0.0 <= tracked_request.tags[\"queue_time\"]", "tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests): # With an", "Scout installed. \"\"\" if app is None: app = celery.Celery(\"tasks\",", "result.result == \"Hello World!\" assert len(tracked_requests) == 1 tracked_request =", "== \"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert ( 0.0 <=", "unicode_literals from contextlib import contextmanager import celery import pytest from", "\"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app:", "in Celery 4.0\" ) @setup_logging.connect def do_nothing(**kwargs): # Just by", "logging - and stop it from interfering with global state", "== \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as", "app is None: app = celery.Celery(\"tasks\", broker=\"memory://\") # Enable Scout", "False @app.task def hello(): return \"Hello World!\" # Setup according", "interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None,", "tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"] ==", "tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is True assert", "len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert \"task_id\" in tracked_request.tags", "# With an empty config, \"monitor\" defaults to False. with", "evil string\"}) .get() ) assert result == \"Hello World!\" assert", "tracked_request.tags[\"queue_time\"] < 60.0 ) # Assume test took <60 seconds", "span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app,", "def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result =", "defaults to False. with app_with_scout(config={}) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply()", "== \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app,", "scout_apm.celery from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif(", "== 2 assert [t.complete_spans[0].operation for t in tracked_requests] == [", "@skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: hello", "True assert tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert", "result == \"Hello World!\" assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation", "assert tracked_request.tags[\"is_eager\"] is True assert tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"]", "from contextlib import contextmanager import celery import pytest from celery.signals", "t in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\"", "from celery.signals import setup_logging import scout_apm.celery from scout_apm.api import Config", "pytest.mark.skipif( celery.VERSION < (4, 0), reason=\"pytest fixtures added in Celery", "= tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker,", "with app_with_scout(app=celery_app) as app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si()", "result == \"Hello World!\" assert len(tracked_requests) == 1 tracked_request =", "== \"\" assert tracked_request.tags[\"routing_key\"] == \"celery\" assert tracked_request.tags[\"queue\"] == \"unknown\"", "assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1 span =", "as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\"", "def hello(): return \"Hello World!\" # Setup according to https://docs.scoutapm.com/#celery", "= tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in", "assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with", "# Just by connecting to this signal, we prevent Celery", "= app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() | hello.si()).apply_async().get() assert result ==", "span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not", "World!\" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert \"task_id\"", "in tests. if config is None: config = {\"monitor\": True}", "installed. \"\"\" if app is None: app = celery.Celery(\"tasks\", broker=\"memory://\")", "( 0.0 <= tracked_request.tags[\"queue_time\"] < 60.0 ) # Assume test", "assert ( 0.0 <= tracked_request.tags[\"queue_time\"] < 60.0 ) # Assume", "Context manager that configures a Celery app with Scout installed.", "configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as app: result =", "return \"Hello World!\" # Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install()", "tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests):", "tracked_requests): with app_with_scout(app=celery_app) as app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result =", "result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert len(tracked_requests)", "import absolute_import, division, print_function, unicode_literals from contextlib import contextmanager import", "celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result", "skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4, 0), reason=\"pytest fixtures added", "<= tracked_request.tags[\"queue_time\"] < 60.0 ) # Assume test took <60", "import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4,", ".apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get() ) assert result == \"Hello", "span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in span.tags @skip_unless_celery_4_plus def", "with app_with_scout(app=celery_app) as app: result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an", "@contextmanager def app_with_scout(app=None, config=None): \"\"\" Context manager that configures a", "def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: hello =", "\"Hello World!\" assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation for t", "span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app,", "import setup_logging import scout_apm.celery from scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test", "\"\"\" if app is None: app = celery.Celery(\"tasks\", broker=\"memory://\") #", "[t.complete_spans[0].operation for t in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ]", "= tracked_requests[0] assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) == 1", "in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as", "is True assert tracked_request.tags[\"exchange\"] == \"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\"", "assert tracked_request.tags[\"queue\"] == \"unknown\" assert ( 0.0 <= tracked_request.tags[\"queue_time\"] <", "\"Hello World!\" assert len(tracked_requests) == 1 tracked_request = tracked_requests[0] assert", "not in tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id", "\"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"] ==", "@setup_logging.connect def do_nothing(**kwargs): # Just by connecting to this signal,", "World!\" # Setup according to https://docs.scoutapm.com/#celery Config.set(**config) scout_apm.celery.install() try: yield", "tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests): # With an empty config,", "tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in span.tags", "that configures a Celery app with Scout installed. \"\"\" if", "config = {\"monitor\": True} # Disable running the agent. config[\"core_agent_launch\"]", "Assume test took <60 seconds assert tracked_request.active_spans == [] assert", "app_with_scout(app=celery_app) as app: hello = app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() |", "stop it from interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass", "# logging - and stop it from interfering with global", "Disable running the agent. config[\"core_agent_launch\"] = False @app.task def hello():", "and stop it from interfering with global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging", "tracked_request.tags[\"is_eager\"] is False assert tracked_request.tags[\"exchange\"] == \"\" assert tracked_request.tags[\"routing_key\"] ==", "state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None): \"\"\" Context", "manager that configures a Celery app with Scout installed. \"\"\"", "= app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert tracked_requests ==", "import contextmanager import celery import pytest from celery.signals import setup_logging", "celery import pytest from celery.signals import setup_logging import scout_apm.celery from", "# Disable running the agent. config[\"core_agent_launch\"] = False @app.task def", "hello(): return \"Hello World!\" # Setup according to https://docs.scoutapm.com/#celery Config.set(**config)", "tracked_requests[0].tags first_task_id = tracked_requests[0].tags[\"task_id\"] assert tracked_requests[1].tags[\"parent_task_id\"] == first_task_id def test_no_monitor(tracked_requests):", "Celery 4.0\" ) @setup_logging.connect def do_nothing(**kwargs): # Just by connecting", "test_hello_eager(tracked_requests): with app_with_scout() as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result", "hello = app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() | hello.si()).apply_async().get() assert result", "== 1 tracked_request = tracked_requests[0] assert tracked_request.active_spans == [] assert", "== \"unknown\" assert tracked_request.tags[\"routing_key\"] == \"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\"", "test took <60 seconds assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans)", "scout_apm.api import Config # http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION <", "tracked_request = tracked_requests[0] assert \"task_id\" in tracked_request.tags assert tracked_request.tags[\"is_eager\"] is", "test_no_monitor(tracked_requests): # With an empty config, \"monitor\" defaults to False.", "test_hello_worker_header_preset(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = (", "reason=\"pytest fixtures added in Celery 4.0\" ) @setup_logging.connect def do_nothing(**kwargs):", "Scout configuration. Config.reset_all() def test_hello_eager(tracked_requests): with app_with_scout() as app: result", "with app_with_scout() as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result ==", "== 1 span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus", "== \"unknown\" assert tracked_request.tags[\"queue\"] == \"unknown\" assert tracked_request.active_spans == []", "app.tasks[\"tests.integration.test_celery.hello\"] result = (hello.si() | hello.si()).apply_async().get() assert result == \"Hello", "app_with_scout(app=None, config=None): \"\"\" Context manager that configures a Celery app", "== \"unknown\" assert ( 0.0 <= tracked_request.tags[\"queue_time\"] < 60.0 )", "tracked_request = tracked_requests[0] assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) ==", "celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = ( app.tasks[\"tests.integration.test_celery.hello\"]", "assert result.result == \"Hello World!\" assert len(tracked_requests) == 1 tracked_request", "== \"Hello World!\" assert len(tracked_requests) == 2 assert [t.complete_spans[0].operation for", "def test_hello_eager(tracked_requests): with app_with_scout() as app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert", "took <60 seconds assert tracked_request.active_spans == [] assert len(tracked_request.complete_spans) ==", "global state # http://docs.celeryproject.org/en/v4.3.0/userguide/signals.html#setup-logging pass @contextmanager def app_with_scout(app=None, config=None): \"\"\"", "= ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get() ) assert", "1 span = tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" assert \"queue_time\"", "yield app finally: scout_apm.celery.uninstall() # Reset Scout configuration. Config.reset_all() def", "@skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result", "configures a Celery app with Scout installed. \"\"\" if app", "# coding=utf-8 from __future__ import absolute_import, division, print_function, unicode_literals from", "0.0 <= tracked_request.tags[\"queue_time\"] < 60.0 ) # Assume test took", "Just by connecting to this signal, we prevent Celery from", "test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app) as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get()", "= tracked_request.complete_spans[0] assert span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker,", "by default in tests. if config is None: config =", "0), reason=\"pytest fixtures added in Celery 4.0\" ) @setup_logging.connect def", "celery.signals import setup_logging import scout_apm.celery from scout_apm.api import Config #", "Celery app with Scout installed. \"\"\" if app is None:", "app: result = ( app.tasks[\"tests.integration.test_celery.hello\"] .apply_async(headers={\"scout_task_start\": \"an evil string\"}) .get()", "span.operation == \"Job/tests.integration.test_celery.hello\" @skip_unless_celery_4_plus def test_hello_worker(celery_app, celery_worker, tracked_requests): with app_with_scout(app=celery_app)", "celery.VERSION < (4, 0), reason=\"pytest fixtures added in Celery 4.0\"", "# http://docs.celeryproject.org/en/latest/userguide/testing.html#py-test skip_unless_celery_4_plus = pytest.mark.skipif( celery.VERSION < (4, 0), reason=\"pytest", "is None: config = {\"monitor\": True} # Disable running the", "in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert \"parent_task_id\" not", "broker=\"memory://\") # Enable Scout by default in tests. if config", "tests. if config is None: config = {\"monitor\": True} #", "for t in tracked_requests] == [ \"Job/tests.integration.test_celery.hello\", \"Job/tests.integration.test_celery.hello\", ] assert", "as app: result = app.tasks[\"tests.integration.test_celery.hello\"].delay().get() assert result == \"Hello World!\"", "\"Job/tests.integration.test_celery.hello\" assert \"queue_time\" not in span.tags @skip_unless_celery_4_plus def test_hello_worker_chain(celery_app, celery_worker,", "app: result = app.tasks[\"tests.integration.test_celery.hello\"].apply() assert result.result == \"Hello World!\" assert" ]
[ "\"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)),", "'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\",", "db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier', ( ('id',", "'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity': { 'Meta': {'ordering': \"('title',)\",", "'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [],", "('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier': {", "null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table for", "('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme':", "('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value',", "'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length':", "('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M", "[], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField',", "self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding M2M table", "[], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),", "'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [],", "('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url',", ")) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm): # Deleting model", "[], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [],", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),", "[], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField',", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank':", "'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity'])", "utf-8 import datetime from south.db import db from south.v2 import", "['Identifier']) # Adding model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title',", "('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme',", "self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType'])", "('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding model 'Entity' db.create_table('places_entity',", "models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) #", "db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity'", "'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)),", "primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types',", "'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)),", "field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),", "[], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField',", "table for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model", "db.send_create_signal('places', ['Source']) # Adding model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note',", "M2M table for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id',", "'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'})", "field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier')", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list':", "models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier'", "# Adding M2M table for field all_types on 'Entity' db.create_table('places_entity_all_types',", "backwards(self, orm): # Deleting model 'Source' db.delete_table('places_source') # Deleting model", "# Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing M2M table for", "Adding M2M table for field _identifiers on 'Entity' db.create_table('places_entity__identifiers', (", "[], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities'\", 'blank':", "'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name':", "for field all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M table", "orm): # Deleting model 'Source' db.delete_table('places_source') # Deleting model 'EntityType'", "M2M table for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing", "primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers',", "'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [],", "# Removing M2M table for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of')", "'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\",", "M2M table for field all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id',", "'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}),", "db.send_create_signal('places', ['Entity']) # Adding M2M table for field all_types on", "field all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M table for", "Removing M2M table for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models", "'False', 'related_name': \"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [],", "{'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [],", "db.delete_table('places_entity_all_types') # Removing M2M table for field all_types_completion on 'Entity'", "blank=True)), )) db.send_create_signal('places', ['Source']) # Adding model 'EntityType' db.create_table('places_entitytype', (", "def backwards(self, orm): # Deleting model 'Source' db.delete_table('places_source') # Deleting", "{ 'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length':", "field all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),", "import db from south.v2 import SchemaMigration from django.db import models", "db.send_create_signal('places', ['EntityType']) # Adding M2M table for field subtype_of on", "'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank':", "primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion',", "('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack',", "'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [],", "field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),", "models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Source'", "django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding", "for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True,", "Removing M2M table for field all_types on 'Entity' db.delete_table('places_entity_all_types') #", "'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})", "auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id',", "('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list':", "\"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField',", "'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity') # Removing M2M", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)),", "table for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models = {", "{'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [],", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural',", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}),", "db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M table for field all_types_completion", "Adding M2M table for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', (", "# Deleting model 'Source' db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype')", "'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),", "import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model", "}, 'places.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype',", "subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table for field", "null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding", "'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [],", "self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) # Adding model 'EntityType' db.create_table('places_entitytype',", "for field _identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True,", "'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name':", "db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype',", ")) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M table for field", ")) db.send_create_signal('places', ['Source']) # Adding model 'EntityType' db.create_table('places_entitytype', ( ('id',", "Adding M2M table for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', (", "null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent',", "'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [],", "'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)),", "('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding model", "}, 'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField',", "['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "\"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}),", "'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}),", "'False', 'related_name': \"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [],", "('django.db.models.fields.TextField', [], {}) }, 'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id':", "# encoding: utf-8 import datetime from south.db import db from", "all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity',", "def forwards(self, orm): # Adding model 'Source' db.create_table('places_source', ( ('id',", "field _identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),", "'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField',", "{'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [],", "M2M table for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models =", "'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null':", "'256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default':", "# Adding model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)),", "_identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity',", "Adding model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source',", "db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier',", "on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table for field _identifiers", "db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'],", "orm): # Adding model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name',", "self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)),", "model 'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity') # Removing", "'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now':", "{'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'subtype_of':", "('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)),", "('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id'])", "# Adding model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)),", "auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id',", "'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length':", "['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table for field subtype_of_completion on", "'related_name': \"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical':", "('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M", "models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table", "{'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField',", "('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list',", "'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField',", "('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm):", "'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table for field _identifiers on", "('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M", "\"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True',", "null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding", "datetime from south.db import db from south.v2 import SchemaMigration from", "db.delete_table('places_entity_all_types_completion') # Removing M2M table for field _identifiers on 'Entity'", "db.delete_table('places_entity') # Removing M2M table for field all_types on 'Entity'", "null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M table for", "{'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null':", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places',", "'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype': { 'Meta': {'ordering':", "db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing M2M table", "db.delete_table('places_entitytype_subtype_of') # Removing M2M table for field subtype_of_completion on 'EntityType'", "'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name':", "'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source':", "# Adding M2M table for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion',", "{'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [],", "from django.db import models class Migration(SchemaMigration): def forwards(self, orm): #", "field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table for", "'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\",", "'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to':", "[], {'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),", "db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True,", ")) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table for field", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)),", "db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name',", "# Deleting model 'Entity' db.delete_table('places_entity') # Removing M2M table for", "('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)),", "all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity',", "'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank':", "'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting model", "{'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical':", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value':", "on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table for field subtype_of_completion", "'Entity' db.delete_table('places_entity_all_types') # Removing M2M table for field all_types_completion on", "[], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField',", "}, 'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)),", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent':", "'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length':", "('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}),", "{'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [],", "# Adding M2M table for field _identifiers on 'Entity' db.create_table('places_entity__identifiers',", "encoding: utf-8 import datetime from south.db import db from south.v2", "for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table", "('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }", "('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False))", "\"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False',", "[], {'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\",", "'_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [],", "'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField',", "('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}),", "Adding model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value',", "db.delete_table('places_entity__identifiers') models = { 'places.entity': { 'Meta': {'ordering': \"('title',)\", 'object_name':", "'identifier_id']) def backwards(self, orm): # Deleting model 'Source' db.delete_table('places_source') #", "('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical':", "'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default':", "'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [],", "('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'],", "# Removing M2M table for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion')", "('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length':", "'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier':", "model 'Source' db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing", "{'max_length': '256'}) }, 'places.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField',", "M2M table for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting", "self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding M2M table for field", "table for field all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID',", "# Adding M2M table for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of',", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier'])", "[], {'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False',", "for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier'", "{}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities'\", 'blank': 'True',", "{'auto_now': 'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name':", "{'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [],", "('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False))", "('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id'])", "model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)),", "null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), ))", "table for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID',", "# Adding M2M table for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion',", "Deleting model 'Source' db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype') #", "'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank':", "primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion',", "null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self,", "self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')),", "all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M table for field", "model 'EntityType' db.delete_table('places_entitytype') # Removing M2M table for field subtype_of", "null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M table for", "self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)),", "M2M table for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id',", "'places.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata':", "{'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [],", "('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) },", "for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table", "('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null':", "models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) #", "('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities'\",", "('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}),", "Removing M2M table for field subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') #", "Adding model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name',", "db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype',", "\"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank': 'True',", "{'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [],", "{ 'places.entity': { 'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField',", ")) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M table for field", "('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding M2M", "('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding M2M", "{'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source': {", "_identifiers on 'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity': { 'Meta':", "'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField',", "('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id'])", "field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table for", "subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype',", "('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to':", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True',", "'related_name': \"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null':", "('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation':", "= { 'places.entity': { 'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers':", "('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) # Adding model 'EntityType'", "'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}),", "'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [],", "'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {})", "'256'}) }, 'places.source': { 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [],", "[], {'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry':", "'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),", "models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm): #", "null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier',", "for field all_types on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True,", "'EntityType' db.delete_table('places_entitytype') # Removing M2M table for field subtype_of on", "# Adding model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50,", "subtype_of_completion on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier') #", "[], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier': { 'Meta':", "Deleting model 'Entity' db.delete_table('places_entity') # Removing M2M table for field", "'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey',", "model 'Entity' db.delete_table('places_entity') # Removing M2M table for field all_types", "on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'],", "{'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [],", "M2M table for field all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing", "self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) #", "SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm):", "('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) },", "<gh_stars>1-10 # encoding: utf-8 import datetime from south.db import db", "table for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') # Removing M2M", "'Source' db.delete_table('places_source') # Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing M2M", "['Entity']) # Adding M2M table for field all_types on 'Entity'", "('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\",", "['Source']) # Adding model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug',", "'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id':", "('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata',", "Removing M2M table for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') #", "[], {'symmetrical': 'False', 'related_name': \"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion':", "import datetime from south.db import db from south.v2 import SchemaMigration", "\"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}),", "M2M table for field _identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id',", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding", "{}) }, 'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [],", "'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme',", "\"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype': { 'Meta':", "table for field _identifiers on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID',", "'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank': 'True', 'to':", "{'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype': {", "models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def", "'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField',", "{'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [],", "('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) # Adding", "{'blank': 'True'}) }, 'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'},", "Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Source' db.create_table('places_source', (", "'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField',", "db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M table for field _identifiers", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_sublocation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'location':", "{ 'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to':", "('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note':", "('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source'])", "'True'}) }, 'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article':", "[], {'symmetrical': 'False', 'related_name': \"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name':", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location',", "db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id']) # Adding M2M table for field subtype_of_completion", "'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),", "'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length':", "self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding model 'Entity' db.create_table('places_entity', (", "[], {'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion':", "'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } } complete_apps = ['places']", "('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id', 'to_entitytype_id'])", "self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()),", "[], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField',", "self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding", "\"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField',", "( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'],", "field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity': {", "\"'subtypes_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural':", "[], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } }", "class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Source' db.create_table('places_source',", "db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype',", "'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities'\", 'blank': 'True', 'to':", "{'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url':", "self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding", "# Adding model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)),", "'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [],", "'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField',", "['entity_id', 'entitytype_id']) # Adding M2M table for field _identifiers on", "self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding M2M table", "[], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField',", "table for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID',", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug':", "Adding M2M table for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', (", "null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding", "db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity') # Removing M2M table", "# Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity')", "('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places',", "{'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type':", "models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding M2M table", "'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank': 'True',", "('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False))", "self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()),", "M2M table for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing", "{'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id':", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) #", ")) db.send_create_signal('places', ['Entity']) # Adding M2M table for field all_types", "# Removing M2M table for field all_types on 'Entity' db.delete_table('places_entity_all_types')", "'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), ))", "'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),", "'True', 'to': \"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [],", "db.send_create_signal('places', ['Identifier']) # Adding model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key':", "\"orm['places.EntityType']\"}), 'verbose_name': ('django.db.models.fields.TextField', [], {}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) },", "self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)),", "table for field subtype_of on 'EntityType' db.create_table('places_entitytype_subtype_of', ( ('id', models.AutoField(verbose_name='ID',", "null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) # Adding", "'to_entitytype_id']) # Adding M2M table for field subtype_of_completion on 'EntityType'", "('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id'])", "auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of', ['from_entitytype_id',", "table for field all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M", "'Meta': {'ordering': \"('verbose_name',)\", 'object_name': 'EntityType'}, 'article': ('django.db.models.fields.CharField', [], {'max_length': '2'}),", "('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) # Adding M2M table for", "{'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical':", "model 'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])),", "('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places',", "('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack':", "'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'})", "models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) ))", "'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source': { 'Meta': {'object_name':", "south.db import db from south.v2 import SchemaMigration from django.db import", "[], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField',", "'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey',", "'entitytype_id']) # Adding M2M table for field _identifiers on 'Entity'", "model 'Identifier' db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)),", "{'max_length': '50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name':", "'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities_completion'\",", "db from south.v2 import SchemaMigration from django.db import models class", "on 'Entity' db.create_table('places_entity_all_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'],", "self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Identifier']) # Adding model 'Entity'", "'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),", "{ 'Meta': {'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated':", "from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration):", "M2M table for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id',", "for field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True,", "'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)),", "Removing M2M table for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion') #", "'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)),", "models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'], null=False)) ))", "models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) # Adding M2M table", "Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting model 'Entity' db.delete_table('places_entity') #", "'related_name': \"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical':", "self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), ))", "'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50',", "for field all_types_completion on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True,", "'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank': 'True', 'to':", "'Entity' db.create_table('places_entity', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type',", "('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField',", "[], {'default': 'False'}), 'show_in_nearby_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField',", "\"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False',", "db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm): # Deleting model 'Source'", "[], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null': 'True'}),", "'32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source': { 'Meta':", "table for field subtype_of on 'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M", "('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) #", "south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def", "'32'}), 'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default':", "( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'],", "Deleting model 'EntityType' db.delete_table('places_entitytype') # Removing M2M table for field", "db.create_table('places_identifier', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places',", "('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model", "Adding model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),", "['entity_id', 'entitytype_id']) # Adding M2M table for field all_types_completion on", "[], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField',", "db.delete_table('places_entitytype') # Removing M2M table for field subtype_of on 'EntityType'", "{'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } } complete_apps", "['EntityType']) # Adding M2M table for field subtype_of on 'EntityType'", ")) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id', 'to_entitytype_id']) # Adding model 'Identifier' db.create_table('places_identifier', (", "('title', self.gf('django.db.models.fields.TextField')(blank=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Source'])), ('primary_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.EntityType'], null=True)), ('location', self.gf('django.contrib.gis.db.models.fields.PointField')(null=True)), ('geometry',", "all_types on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M table for field", "'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default':", "'False', 'related_name': \"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField', [],", "('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index':", "{'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'module_name':", "('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source': { 'Meta': {'object_name': 'Source'},", "('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ))", "('geometry', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True)), ('_metadata', self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation',", ")) db.send_create_signal('places', ['Identifier']) # Adding model 'Entity' db.create_table('places_entity', ( ('id',", "'entitytype_id']) # Adding M2M table for field all_types_completion on 'Entity'", "'EntityType' db.delete_table('places_entitytype_subtype_of') # Removing M2M table for field subtype_of_completion on", "[], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'places.entitytype':", "'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier': { 'Meta': {'object_name': 'Identifier'},", "for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity':", "'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'to':", "Adding M2M table for field all_types on 'Entity' db.create_table('places_entity_all_types', (", "('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default':", "self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('places', ['Source']) # Adding model", "{'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [],", "'True', 'blank': 'True'}), 'module_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField',", "['entity_id', 'identifier_id']) def backwards(self, orm): # Deleting model 'Source' db.delete_table('places_source')", "[], {}) }, 'places.identifier': { 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField',", "'identifier_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'is_stack': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),", "auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id',", "# Removing M2M table for field all_types_completion on 'Entity' db.delete_table('places_entity_all_types_completion')", "[], {'max_length': '256'}) }, 'places.source': { 'Meta': {'object_name': 'Source'}, 'id':", "auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of_completion', ['from_entitytype_id',", "from south.db import db from south.v2 import SchemaMigration from django.db", "{'symmetrical': 'False', 'related_name': \"'subtypes'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'subtype_of_completion': ('django.db.models.fields.related.ManyToManyField',", "models = { 'places.entity': { 'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'},", "\"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.EntityType']\", 'null': 'True'}),", "{'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title':", "[], {'blank': 'True'}) }, 'places.entitytype': { 'Meta': {'ordering': \"('verbose_name',)\", 'object_name':", "[], {'max_length': '32'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'places.source':", "forwards(self, orm): # Adding model 'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),", "'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length':", "'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [],", "('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'value':", ")) db.send_create_signal('places', ['EntityType']) # Adding M2M table for field subtype_of", "{'object_name': 'Source'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [],", "[], {'default': 'False'}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey',", "'128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) } } complete_apps =", "import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self,", "'2'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null':", "[], {'to': \"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}),", "primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entitytype_subtype_of',", "{ 'Meta': {'object_name': 'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme':", "on 'Entity' db.create_table('places_entity__identifiers', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'],", "self.gf('django.db.models.fields.TextField')(default='{}')), ('absolute_url', self.gf('django.db.models.fields.TextField')()), ('parent', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)),", "('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Entity']\", 'null': 'True'}), 'primary_type': ('django.db.models.fields.related.ForeignKey', [], {'to':", "# Removing M2M table for field _identifiers on 'Entity' db.delete_table('places_entity__identifiers')", "'Identifier'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length':", "self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding M2M table for field", "on 'Entity' db.create_table('places_entity_all_types_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'],", "[], {'default': 'False'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),", "db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)), ('show_in_category_list',", "{'symmetrical': 'False', 'related_name': \"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField',", "on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'],", "models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)), ('to_entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) ))", "('note', self.gf('django.db.models.fields.TextField')(null=True)), )) db.send_create_signal('places', ['EntityType']) # Adding M2M table for", "{'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}), 'geometry': ('django.contrib.gis.db.models.fields.GeometryField',", "null=False)) )) db.create_unique('places_entity__identifiers', ['entity_id', 'identifier_id']) def backwards(self, orm): # Deleting", "{'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [],", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),", "\"orm['places.EntityType']\", 'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField',", "'note': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'show_in_category_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),", "[], {'primary_key': 'True'}), 'identifier_scheme': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'identifier_value': ('django.db.models.fields.CharField',", "on 'EntityType' db.delete_table('places_entitytype_subtype_of_completion') # Deleting model 'Identifier' db.delete_table('places_identifier') # Deleting", "self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()), ('verbose_name_plural', self.gf('django.db.models.fields.TextField')()), ('show_in_nearby_list', self.gf('django.db.models.fields.BooleanField')(default=False)),", "model 'EntityType' db.create_table('places_entitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article',", "'to': \"orm['places.EntityType']\"}), 'all_types_completion': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities_completion'\", 'blank':", "'Source' db.create_table('places_source', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('module_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('last_updated',", "models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types', ['entity_id', 'entitytype_id']) #", "subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('from_entitytype',", "'null': 'True'}), 'source': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['places.Source']\"}), 'title': ('django.db.models.fields.TextField', [],", "on 'Entity' db.delete_table('places_entity_all_types') # Removing M2M table for field all_types_completion", "('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('places', ['Entity']) #", "'places.entity': { 'Meta': {'ordering': \"('title',)\", 'object_name': 'Entity'}, '_identifiers': ('django.db.models.fields.related.ManyToManyField', [],", "'_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types':", "( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('entity', models.ForeignKey(orm['places.entity'], null=False)), ('identifier', models.ForeignKey(orm['places.identifier'],", "('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'entities'\", 'blank': 'True', 'to': \"orm['places.EntityType']\"}),", "on 'Entity' db.delete_table('places_entity__identifiers') models = { 'places.entity': { 'Meta': {'ordering':", "models.ForeignKey(orm['places.entity'], null=False)), ('entitytype', models.ForeignKey(orm['places.entitytype'], null=False)) )) db.create_unique('places_entity_all_types_completion', ['entity_id', 'entitytype_id']) #", "( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)), ('article', self.gf('django.db.models.fields.CharField')(max_length=2)), ('verbose_name', self.gf('django.db.models.fields.TextField')()),", "{}), 'verbose_name_plural': ('django.db.models.fields.TextField', [], {}) }, 'places.identifier': { 'Meta': {'object_name':", "[], {'to': \"orm['places.Identifier']\", 'symmetrical': 'False'}), '_metadata': ('django.db.models.fields.TextField', [], {'default': \"'{}'\"}),", "'50', 'db_index': 'True'}), 'subtype_of': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'subtypes'\",", "\"'{}'\"}), 'absolute_url': ('django.db.models.fields.TextField', [], {}), 'all_types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False',", "field subtype_of_completion on 'EntityType' db.create_table('places_entitytype_subtype_of_completion', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),", "self.gf('django.db.models.fields.related.ForeignKey')(to=orm['places.Entity'], null=True)), ('is_sublocation', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_stack', self.gf('django.db.models.fields.BooleanField')(default=False)), ('identifier_scheme', self.gf('django.db.models.fields.CharField')(max_length=32)), ('identifier_value', self.gf('django.db.models.fields.CharField')(max_length=256)),", "'Entity' db.delete_table('places_entity') # Removing M2M table for field all_types on" ]
[ "accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument", "dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument", "__args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name if", "to forward the messages \"\"\" return pulumi.get(self, \"forward_to\") @property @pulumi.getter", "raise TypeError(\"Expected argument 'count_details' to be a dict\") pulumi.set(__self__, \"count_details\",", "argument 'message_count' to be a float\") pulumi.set(__self__, \"message_count\", message_count) if", "isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a str\")", "a dict\") pulumi.set(__self__, \"count_details\", count_details) if created_at and not isinstance(created_at,", "used when TimeToLive is not set on a message itself.", "subscription has an affinity to the client id. \"\"\" return", "10 minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self)", "when TimeToLive is not set on a message itself. \"\"\"", "max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None): if accessed_at", "Service Bus. This is the default value used when TimeToLive", "\"system_data\") @property @pulumi.getter def type(self) -> str: \"\"\" Resource type", "def message_count(self) -> float: \"\"\" Number of messages. \"\"\" return", "default value is 1 minute. \"\"\" return pulumi.get(self, \"lock_duration\") @property", "if name and not isinstance(name, str): raise TypeError(\"Expected argument 'name'", "the messages \"\"\" return pulumi.get(self, \"forward_to\") @property @pulumi.getter def id(self)", "def lock_duration(self) -> Optional[str]: \"\"\" ISO 8061 lock duration timespan", "to be a str\") pulumi.set(__self__, \"status\", status) if system_data and", "not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be a", "= resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name if opts", "TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to)", "and not isinstance(accessed_at, str): raise TypeError(\"Expected argument 'accessed_at' to be", "8061 lock duration timespan for the subscription. The default value", "pulumi.set(__self__, \"id\", id) if is_client_affine and not isinstance(is_client_affine, bool): raise", "a subscription has dead letter support on filter evaluation exceptions.", "None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version =", "is the duration after which the message expires, starting from", "subscription name. :param str topic_name: The topic name. \"\"\" __args__", "Default message timespan to live value. This is the duration", "@property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic name to", "subscription. :param str subscription_name: The subscription name. :param str topic_name:", "raise TypeError(\"Expected argument 'default_message_time_to_live' to be a str\") pulumi.set(__self__, \"default_message_time_to_live\",", "bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be a bool\") pulumi.set(__self__,", "'client_affine_properties' to be a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details", "Union, overload from ... import _utilities from . import outputs", "if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration'", "subscription. \"\"\" return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) ->", "= dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] =", "this file was generated by the Pulumi SDK Generator. ***", "created_at and not isinstance(created_at, str): raise TypeError(\"Expected argument 'created_at' to", "timespan to live value. This is the duration after which", "-> Optional[str]: \"\"\" ISO 8601 timeSpan structure that defines the", "dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument", "@property @pulumi.getter def status(self) -> Optional[str]: \"\"\" Enumerates the possible", "float\") pulumi.set(__self__, \"message_count\", message_count) if name and not isinstance(name, str):", "argument 'default_message_time_to_live' to be a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if", "message timespan to live value. This is the duration after", "if opts is None: opts = pulumi.InvokeOptions() if opts.version is", "message itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self)", "be a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details and not", "the Dead Letter message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\")", "= None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: \"\"\" Description", "\"\"\" Exact time the message was created. \"\"\" return pulumi.get(self,", "time there was a receive request to this subscription. \"\"\"", "created. \"\"\" return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) ->", "return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live,", "to be a dict\") pulumi.set(__self__, \"count_details\", count_details) if created_at and", "\"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties, dict): raise TypeError(\"Expected", "type and not isinstance(type, str): raise TypeError(\"Expected argument 'type' to", "@pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message count details \"\"\"", "is_client_affine) if lock_duration and not isinstance(lock_duration, str): raise TypeError(\"Expected argument", "enable_batched_operations and not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument 'enable_batched_operations' to", "which the message expires, starting from when the message is", "to Service Bus. This is the default value used when", "pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence,", "a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to,", "accessed_at and not isinstance(accessed_at, str): raise TypeError(\"Expected argument 'accessed_at' to", "enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument", "support when a message expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property", "= subscription_name __args__['topicName'] = topic_name if opts is None: opts", "pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details,", "client id. \"\"\" return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self)", "message was created. \"\"\" return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def", "def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value that indicates whether a", "Id \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) ->", "data relating to this resource. \"\"\" return pulumi.get(self, \"system_data\") @property", "the Azure subscription. :param str subscription_name: The subscription name. :param", "__args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName']", "isinstance(count_details, dict): raise TypeError(\"Expected argument 'count_details' to be a dict\")", "pulumi.set(__self__, \"message_count\", message_count) if name and not isinstance(name, str): raise", "The default value is 10 minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\")", "__args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name if opts is None:", "default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count,", "and not isinstance(created_at, str): raise TypeError(\"Expected argument 'created_at' to be", "to forward the Dead Letter message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\")", "to be a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count and", "for the subscription. The default value is 1 minute. \"\"\"", "-> str: \"\"\" The exact time the message was updated.", "subscription_name: Optional[str] = None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions]", "pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self) -> float: \"\"\" Number", "'created_at' to be a str\") pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions", "\"\"\" return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse':", "\"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]:", "duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None,", "@pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO 8601 timeSpan structure", "of messages. \"\"\" return pulumi.get(self, \"message_count\") @property @pulumi.getter def name(self)", "raise TypeError(\"Expected argument 'forward_to' to be a str\") pulumi.set(__self__, \"forward_to\",", "\"\"\" ISO 8061 Default message timespan to live value. This", "type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str] =", "was updated. \"\"\" return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint:", "be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and not", "expires, starting from when the message is sent to Service", "\"type\", type) if updated_at and not isinstance(updated_at, str): raise TypeError(\"Expected", "\"message_count\") @property @pulumi.getter def name(self) -> str: \"\"\" Resource name", "meta data relating to this resource. \"\"\" return pulumi.get(self, \"system_data\")", "@pulumi.getter(name=\"createdAt\") def created_at(self) -> str: \"\"\" Exact time the message", "to be a str\") pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and", "str): raise TypeError(\"Expected argument 'accessed_at' to be a str\") pulumi.set(__self__,", "a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live,", "str\") pulumi.set(__self__, \"id\", id) if is_client_affine and not isinstance(is_client_affine, bool):", "TypeError(\"Expected argument 'count_details' to be a dict\") pulumi.set(__self__, \"count_details\", count_details)", "'max_delivery_count' to be a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count", "\"\"\" return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str:", "whether a subscription has dead letter support on filter evaluation", "a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details and not isinstance(count_details,", "dict): raise TypeError(\"Expected argument 'client_affine_properties' to be a dict\") pulumi.set(__self__,", "forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to", "str\") pulumi.set(__self__, \"name\", name) if requires_session and not isinstance(requires_session, bool):", "was a receive request to this subscription. \"\"\" return pulumi.get(self,", "Optional[bool]: \"\"\" Value that indicates whether the subscription has an", "default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument 'default_message_time_to_live' to", "server-side batched operations are enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\") @property", "that defines the duration of the duplicate detection history. The", "return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self):", "forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None,", "\"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO 8061", "a str\") pulumi.set(__self__, \"type\", type) if updated_at and not isinstance(updated_at,", "default value used when TimeToLive is not set on a", "TypeError(\"Expected argument 'created_at' to be a str\") pulumi.set(__self__, \"created_at\", created_at)", "\"\"\" return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self) -> str:", "requires_session and not isinstance(requires_session, bool): raise TypeError(\"Expected argument 'requires_session' to", "pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]: \"\"\" Queue/Topic", "after which the topic is automatically deleted. The minimum duration", "message expires, starting from when the message is sent to", "pulumi.get(self, \"forward_to\") @property @pulumi.getter def id(self) -> str: \"\"\" Resource", "@property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]: \"\"\" Value that indicates", "value used when TimeToLive is not set on a message", "default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count,", "'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: \"\"\" Description of subscription", "accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations,", "is the default value used when TimeToLive is not set", "'duplicate_detection_history_time_window' to be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations", "status) if system_data and not isinstance(system_data, dict): raise TypeError(\"Expected argument", "-> str: \"\"\" Resource Id \"\"\" return pulumi.get(self, \"id\") @property", "__args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at,", "isinstance(is_client_affine, bool): raise TypeError(\"Expected argument 'is_client_affine' to be a bool\")", "return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]: \"\"\"", "from ... import _utilities from . import outputs __all__ =", "enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None,", "TypeError(\"Expected argument 'is_client_affine' to be a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine)", "and not isinstance(status, str): raise TypeError(\"Expected argument 'status' to be", "a dict\") pulumi.set(__self__, \"system_data\", system_data) if type and not isinstance(type,", "-> Optional[str]: \"\"\" Queue/Topic name to forward the messages \"\"\"", "@pulumi.getter def name(self) -> str: \"\"\" Resource name \"\"\" return", "a float\") pulumi.set(__self__, \"message_count\", message_count) if name and not isinstance(name,", "Value that indicates whether the subscription has an affinity to", "str): raise TypeError(\"Expected argument 'default_message_time_to_live' to be a str\") pulumi.set(__self__,", "raise TypeError(\"Expected argument 'auto_delete_on_idle' to be a str\") pulumi.set(__self__, \"auto_delete_on_idle\",", "'dead_lettering_on_filter_evaluation_exceptions' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration", "exact time the message was updated. \"\"\" return pulumi.get(self, \"updated_at\")", "def forward_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward the", "\"\"\" return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]:", "\"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic name", "pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]: \"\"\" ISO", "@pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The system meta data", "not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be a", "= None, resource_group_name: Optional[str] = None, subscription_name: Optional[str] = None,", "accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None,", "resource_group_name: Optional[str] = None, subscription_name: Optional[str] = None, topic_name: Optional[str]", "str topic_name: The topic name. \"\"\" __args__ = dict() __args__['namespaceName']", "value is 1 minute. \"\"\" return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\")", "\"\"\" ISO 8061 timeSpan idle interval after which the topic", "if count_details and not isinstance(count_details, dict): raise TypeError(\"Expected argument 'count_details'", "messages. \"\"\" return pulumi.get(self, \"message_count\") @property @pulumi.getter def name(self) ->", "@pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to client", "Exact time the message was created. \"\"\" return pulumi.get(self, \"created_at\")", "forward the messages \"\"\" return pulumi.get(self, \"forward_to\") @property @pulumi.getter def", "isinstance(forward_to, str): raise TypeError(\"Expected argument 'forward_to' to be a str\")", "str): raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__,", "AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window,", "status of a messaging entity. \"\"\" return pulumi.get(self, \"status\") @property", "isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool\")", "raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\",", "the duplicate detection history. The default value is 10 minutes.", "subscription. The default value is 1 minute. \"\"\" return pulumi.get(self,", "whether the subscription has an affinity to the client id.", "hand unless you're certain you know what you are doing!", "__await__(self): if False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties,", "opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return", "raise TypeError(\"Expected argument 'message_count' to be a float\") pulumi.set(__self__, \"message_count\",", "dict\") pulumi.set(__self__, \"count_details\", count_details) if created_at and not isinstance(created_at, str):", "subscription_name __args__['topicName'] = topic_name if opts is None: opts =", "-> Optional[bool]: \"\"\" Value that indicates whether the subscription has", "\"\"\" return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]:", "-> 'outputs.MessageCountDetailsResponse': \"\"\" Message count details \"\"\" return pulumi.get(self, \"count_details\")", "'outputs.SystemDataResponse': \"\"\" The system meta data relating to this resource.", "pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties", "on filter evaluation exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\")", "if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions'", "client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to client affine subscriptions.", "str): raise TypeError(\"Expected argument 'status' to be a str\") pulumi.set(__self__,", "the default value used when TimeToLive is not set on", "dict): raise TypeError(\"Expected argument 'system_data' to be a dict\") pulumi.set(__self__,", "] @pulumi.output_type class GetSubscriptionResult: \"\"\" Description of subscription resource. \"\"\"", "Optional[str]: \"\"\" ISO 8061 timeSpan idle interval after which the", "\"\"\" Value that indicates whether a subscription has dead letter", "to this resource. \"\"\" return pulumi.get(self, \"system_data\") @property @pulumi.getter def", "requires_session(self) -> Optional[bool]: \"\"\" Value indicating if a subscription supports", "live value. This is the duration after which the message", "\"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]:", "duration after which the message expires, starting from when the", "unless you're certain you know what you are doing! ***", "topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult:", "the Resource group within the Azure subscription. :param str subscription_name:", "pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic", "5 minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self)", "return pulumi.get(self, \"message_count\") @property @pulumi.getter def name(self) -> str: \"\"\"", ":param str resource_group_name: Name of the Resource group within the", "def requires_session(self) -> Optional[bool]: \"\"\" Value indicating if a subscription", "client_affine_properties and not isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument 'client_affine_properties' to", "be a dict\") pulumi.set(__self__, \"system_data\", system_data) if type and not", "\"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value that", "pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value", "topic_name if opts is None: opts = pulumi.InvokeOptions() if opts.version", "bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool):", "isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument 'enable_batched_operations' to be a bool\")", "isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument 'default_message_time_to_live' to be a str\")", "deleted. The minimum duration is 5 minutes. \"\"\" return pulumi.get(self,", "name) if requires_session and not isinstance(requires_session, bool): raise TypeError(\"Expected argument", "not isinstance(forward_to, str): raise TypeError(\"Expected argument 'forward_to' to be a", "structure that defines the duration of the duplicate detection history.", "'updated_at' to be a str\") pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\")", "count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id,", "if system_data and not isinstance(system_data, dict): raise TypeError(\"Expected argument 'system_data'", "\"status\", status) if system_data and not isinstance(system_data, dict): raise TypeError(\"Expected", "be a str\") pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self)", "argument 'system_data' to be a dict\") pulumi.set(__self__, \"system_data\", system_data) if", "TypeError(\"Expected argument 'lock_duration' to be a str\") pulumi.set(__self__, \"lock_duration\", lock_duration)", "\"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected", "and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be", "max_delivery_count) if message_count and not isinstance(message_count, float): raise TypeError(\"Expected argument", "\"\"\" ISO 8601 timeSpan structure that defines the duration of", "opts is None: opts = pulumi.InvokeOptions() if opts.version is None:", "return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]: \"\"\"", "dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to", "if forward_to and not isinstance(forward_to, str): raise TypeError(\"Expected argument 'forward_to'", "return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]: \"\"\"", "and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be", "Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities", "None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: \"\"\" Description of", "system_data and not isinstance(system_data, dict): raise TypeError(\"Expected argument 'system_data' to", "subscription has dead letter support when a message expires. \"\"\"", "'status' to be a str\") pulumi.set(__self__, \"status\", status) if system_data", "after which the message expires, starting from when the message", "Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: \"\"\" Description of subscription resource.", "from . import outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription',", "Enumerates the possible values for the status of a messaging", "import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union,", "raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\",", "\"max_delivery_count\", max_delivery_count) if message_count and not isinstance(message_count, float): raise TypeError(\"Expected", "affinity to the client id. \"\"\" return pulumi.get(self, \"is_client_affine\") @property", "what you are doing! *** import warnings import pulumi import", "if a subscription supports the concept of sessions. \"\"\" return", "@property @pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The system meta", "topic name. \"\"\" __args__ = dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName']", "@pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]: \"\"\" Value that indicates whether", "Pulumi SDK Generator. *** # *** Do not edit by", "pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count and not isinstance(max_delivery_count, int): raise", "raise TypeError(\"Expected argument 'updated_at' to be a str\") pulumi.set(__self__, \"updated_at\",", "is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts,", "\"message_count\", message_count) if name and not isinstance(name, str): raise TypeError(\"Expected", "not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a", "TypeError(\"Expected argument 'type' to be a str\") pulumi.set(__self__, \"type\", type)", "def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None,", "raise TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\",", "and not isinstance(message_count, float): raise TypeError(\"Expected argument 'message_count' to be", "starting from when the message is sent to Service Bus.", "a subscription supports the concept of sessions. \"\"\" return pulumi.get(self,", "\"\"\" return pulumi.get(self, \"system_data\") @property @pulumi.getter def type(self) -> str:", "\"client_affine_properties\", client_affine_properties) if count_details and not isinstance(count_details, dict): raise TypeError(\"Expected", "\"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]:", "return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]: \"\"\"", "opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version()", "updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None,", "be a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration and not", "'default_message_time_to_live' to be a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window", "the possible values for the status of a messaging entity.", "name :param str resource_group_name: Name of the Resource group within", "pulumi.set(__self__, \"name\", name) if requires_session and not isinstance(requires_session, bool): raise", "timeSpan structure that defines the duration of the duplicate detection", "Optional[str]: \"\"\" ISO 8061 Default message timespan to live value.", "updated_at(self) -> str: \"\"\" The exact time the message was", "yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions,", "str resource_group_name: Name of the Resource group within the Azure", "be a dict\") pulumi.set(__self__, \"count_details\", count_details) if created_at and not", "return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self) -> str: \"\"\"", "topic_name: The topic name. \"\"\" __args__ = dict() __args__['namespaceName'] =", "self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,", "auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to,", "\"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO 8601", "def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to client affine", "-> str: \"\"\" Last time there was a receive request", "be a str\") pulumi.set(__self__, \"type\", type) if updated_at and not", "and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to be", "return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str: \"\"\"", "# pylint: disable=using-constant-test def __await__(self): if False: yield self return", "Letter message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self)", "bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live, str):", "there was a receive request to this subscription. \"\"\" return", "\"\"\" ISO 8061 lock duration timespan for the subscription. The", "-> str: \"\"\" Resource name \"\"\" return pulumi.get(self, \"name\") @property", "warnings import pulumi import pulumi.runtime from typing import Any, Mapping,", "pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise", "max_delivery_count and not isinstance(max_delivery_count, int): raise TypeError(\"Expected argument 'max_delivery_count' to", "Optional[bool]: \"\"\" Value that indicates whether a subscription has dead", "resource. :param str namespace_name: The namespace name :param str resource_group_name:", "if default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument 'default_message_time_to_live'", "'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: \"\"\" Description of subscription resource.", "duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name,", "Optional[str] = None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] =", "argument 'created_at' to be a str\") pulumi.set(__self__, \"created_at\", created_at) if", "str: \"\"\" Resource name \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\")", "be a str\") pulumi.set(__self__, \"id\", id) if is_client_affine and not", "argument 'enable_batched_operations' to be a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if", "dict): raise TypeError(\"Expected argument 'count_details' to be a dict\") pulumi.set(__self__,", "Resource group within the Azure subscription. :param str subscription_name: The", "str\") pulumi.set(__self__, \"type\", type) if updated_at and not isinstance(updated_at, str):", "# coding=utf-8 # *** WARNING: this file was generated by", "import warnings import pulumi import pulumi.runtime from typing import Any,", "The system meta data relating to this resource. \"\"\" return", "to be a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and", "and not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument 'default_message_time_to_live' to be", "dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value that indicates whether a subscription", "'auto_delete_on_idle' to be a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties", "\"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]:", "AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self", "-> Optional[bool]: \"\"\" Value that indicates whether a subscription has", "*** import warnings import pulumi import pulumi.runtime from typing import", "to be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and", "a str\") pulumi.set(__self__, \"status\", status) if system_data and not isinstance(system_data,", "\"\"\" Resource Id \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def", "name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] =", "Queue/Topic name to forward the Dead Letter message \"\"\" return", "= None, subscription_name: Optional[str] = None, topic_name: Optional[str] = None,", "-> str: \"\"\" Exact time the message was created. \"\"\"", "__init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None,", "\"system_data\", system_data) if type and not isinstance(type, str): raise TypeError(\"Expected", "bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool\") pulumi.set(__self__,", "'accessed_at' to be a str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle", "you know what you are doing! *** import warnings import", "is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None):", "TypeError(\"Expected argument 'name' to be a str\") pulumi.set(__self__, \"name\", name)", "specific to client affine subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\") @property", "@pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward", "@pulumi.getter def status(self) -> Optional[str]: \"\"\" Enumerates the possible values", "enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session,", "isinstance(message_count, float): raise TypeError(\"Expected argument 'message_count' to be a float\")", "message was updated. \"\"\" return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): #", "isinstance(status, str): raise TypeError(\"Expected argument 'status' to be a str\")", "be a str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count and not", "-> float: \"\"\" Number of messages. \"\"\" return pulumi.get(self, \"message_count\")", "system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The system meta data relating to", "argument 'id' to be a str\") pulumi.set(__self__, \"id\", id) if", "pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise", "\"\"\" Resource type \"\"\" return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def", "a message expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def", "@property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]: \"\"\" Value that indicates", "raise TypeError(\"Expected argument 'type' to be a str\") pulumi.set(__self__, \"type\",", "Number of maximum deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\")", "@property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]: \"\"\" Number of maximum", "isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be a str\")", "be a str\") pulumi.set(__self__, \"name\", name) if requires_session and not", "the message is sent to Service Bus. This is the", "argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if", "bool\") pulumi.set(__self__, \"requires_session\", requires_session) if status and not isinstance(status, str):", "@pulumi.getter def id(self) -> str: \"\"\" Resource Id \"\"\" return", "that indicates whether the subscription has an affinity to the", "not isinstance(status, str): raise TypeError(\"Expected argument 'status' to be a", "\"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message count", "\"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self) -> str: \"\"\" Exact time", "int): raise TypeError(\"Expected argument 'max_delivery_count' to be a int\") pulumi.set(__self__,", "and not isinstance(count_details, dict): raise TypeError(\"Expected argument 'count_details' to be", "@property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO 8061 Default", "sent to Service Bus. This is the default value used", "letter support on filter evaluation exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\")", "TypeError(\"Expected argument 'max_delivery_count' to be a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count)", "minute. \"\"\" return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) ->", "requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None,", "= None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None)", "a str\") pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions,", "= None) -> AwaitableGetSubscriptionResult: \"\"\" Description of subscription resource. :param", "id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None,", "name. :param str topic_name: The topic name. \"\"\" __args__ =", "relating to this resource. \"\"\" return pulumi.get(self, \"system_data\") @property @pulumi.getter", "@property @pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]: \"\"\" Value indicating if", "name. \"\"\" __args__ = dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] =", "argument 'is_client_affine' to be a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if", "client affine subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def", "str): raise TypeError(\"Expected argument 'type' to be a str\") pulumi.set(__self__,", "-> Optional[str]: \"\"\" ISO 8061 Default message timespan to live", "pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]: \"\"\" Number", "None) -> AwaitableGetSubscriptionResult: \"\"\" Description of subscription resource. :param str", "time the message was updated. \"\"\" return pulumi.get(self, \"updated_at\") class", "bool): raise TypeError(\"Expected argument 'enable_batched_operations' to be a bool\") pulumi.set(__self__,", "lock_duration and not isinstance(lock_duration, str): raise TypeError(\"Expected argument 'lock_duration' to", "get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, subscription_name: Optional[str]", "_utilities from . import outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult',", "isinstance(lock_duration, str): raise TypeError(\"Expected argument 'lock_duration' to be a str\")", "\"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]:", "pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details and not isinstance(count_details, dict): raise", "= _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult(", "from when the message is sent to Service Bus. This", "ISO 8061 Default message timespan to live value. This is", "def accessed_at(self) -> str: \"\"\" Last time there was a", "-> Optional[str]: \"\"\" Enumerates the possible values for the status", "\"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']:", "import outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type", "\"\"\" Number of messages. \"\"\" return pulumi.get(self, \"message_count\") @property @pulumi.getter", "return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\"", "*** # *** Do not edit by hand unless you're", "'enable_batched_operations' to be a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to", "Properties specific to client affine subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\")", "*** Do not edit by hand unless you're certain you", "be a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and not", "lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None): if", "raise TypeError(\"Expected argument 'max_delivery_count' to be a int\") pulumi.set(__self__, \"max_delivery_count\",", "@pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]: \"\"\" Number of maximum deliveries.", "is 1 minute. \"\"\" return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def", "forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session, status=__ret__.status,", "are enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self)", "dead letter support on filter evaluation exceptions. \"\"\" return pulumi.get(self,", "and not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument 'enable_batched_operations' to be", "be a float\") pulumi.set(__self__, \"message_count\", message_count) if name and not", "return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\"", "raise TypeError(\"Expected argument 'system_data' to be a dict\") pulumi.set(__self__, \"system_data\",", "False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at,", "Description of subscription resource. :param str namespace_name: The namespace name", "argument 'dead_lettering_on_message_expiration' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if", "-> Optional[int]: \"\"\" Number of maximum deliveries. \"\"\" return pulumi.get(self,", "str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str):", "None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value", "dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count,", "detection history. The default value is 10 minutes. \"\"\" return", "if lock_duration and not isinstance(lock_duration, str): raise TypeError(\"Expected argument 'lock_duration'", "float): raise TypeError(\"Expected argument 'message_count' to be a float\") pulumi.set(__self__,", "= pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__", "str): raise TypeError(\"Expected argument 'created_at' to be a str\") pulumi.set(__self__,", "not isinstance(max_delivery_count, int): raise TypeError(\"Expected argument 'max_delivery_count' to be a", "indicating if a subscription supports the concept of sessions. \"\"\"", "dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration,", "has an affinity to the client id. \"\"\" return pulumi.get(self,", "@pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value that indicates whether", "the subscription has an affinity to the client id. \"\"\"", "def is_client_affine(self) -> Optional[bool]: \"\"\" Value that indicates whether the", "opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__,", "\"\"\" return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def", "Optional[bool]: \"\"\" Value indicating if a subscription supports the concept", "raise TypeError(\"Expected argument 'lock_duration' to be a str\") pulumi.set(__self__, \"lock_duration\",", "raise TypeError(\"Expected argument 'status' to be a str\") pulumi.set(__self__, \"status\",", "*** WARNING: this file was generated by the Pulumi SDK", "status=None, system_data=None, type=None, updated_at=None): if accessed_at and not isinstance(accessed_at, str):", "is not set on a message itself. \"\"\" return pulumi.get(self,", "\"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected", "\"\"\" Queue/Topic name to forward the Dead Letter message \"\"\"", "message_count=None, name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None): if accessed_at and", "@property @pulumi.getter def id(self) -> str: \"\"\" Resource Id \"\"\"", "that indicates whether server-side batched operations are enabled. \"\"\" return", "pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self): if", "to be a str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle and", "forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to, str): raise TypeError(\"Expected argument", "str\") pulumi.set(__self__, \"forward_to\", forward_to) if id and not isinstance(id, str):", "Last time there was a receive request to this subscription.", "dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None,", "pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to, str): raise", "argument 'updated_at' to be a str\") pulumi.set(__self__, \"updated_at\", updated_at) @property", "def enable_batched_operations(self) -> Optional[bool]: \"\"\" Value that indicates whether server-side", "a receive request to this subscription. \"\"\" return pulumi.get(self, \"accessed_at\")", "@property @pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]: \"\"\" Queue/Topic name to", "auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None,", "doing! *** import warnings import pulumi import pulumi.runtime from typing", "not isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument 'client_affine_properties' to be a", "\"\"\" Last time there was a receive request to this", "not isinstance(is_client_affine, bool): raise TypeError(\"Expected argument 'is_client_affine' to be a", "request to this subscription. \"\"\" return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\")", "a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window,", "\"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None,", "TypeError(\"Expected argument 'updated_at' to be a str\") pulumi.set(__self__, \"updated_at\", updated_at)", "pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The", "TypeError(\"Expected argument 'system_data' to be a dict\") pulumi.set(__self__, \"system_data\", system_data)", "\"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self) -> float: \"\"\" Number of", "automatically deleted. The minimum duration is 5 minutes. \"\"\" return", "@property @pulumi.getter def type(self) -> str: \"\"\" Resource type \"\"\"", "minimum duration is 5 minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property", "str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be a str\") pulumi.set(__self__,", "str): raise TypeError(\"Expected argument 'forward_to' to be a str\") pulumi.set(__self__,", "_utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at,", "id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session, status=__ret__.status, system_data=__ret__.system_data, type=__ret__.type,", "if id and not isinstance(id, str): raise TypeError(\"Expected argument 'id'", "forward_to) if id and not isinstance(id, str): raise TypeError(\"Expected argument", "requires_session=None, status=None, system_data=None, type=None, updated_at=None): if accessed_at and not isinstance(accessed_at,", "isinstance(accessed_at, str): raise TypeError(\"Expected argument 'accessed_at' to be a str\")", "isinstance(updated_at, str): raise TypeError(\"Expected argument 'updated_at' to be a str\")", "defines the duration of the duplicate detection history. The default", "pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value", "the Pulumi SDK Generator. *** # *** Do not edit", "isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument 'auto_delete_on_idle' to be a str\")", "a messaging entity. \"\"\" return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def", "updated. \"\"\" return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test", "Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: \"\"\"", "... import _utilities from . import outputs __all__ = [", "def system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The system meta data relating", "Optional, Sequence, Union, overload from ... import _utilities from .", "indicates whether a subscription has dead letter support on filter", "concept of sessions. \"\"\" return pulumi.get(self, \"requires_session\") @property @pulumi.getter def", "interval after which the topic is automatically deleted. The minimum", "message_count(self) -> float: \"\"\" Number of messages. \"\"\" return pulumi.get(self,", "str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str):", "forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data,", "message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self) ->", "@pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO 8061 timeSpan idle", "if False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details,", "name to forward the Dead Letter message \"\"\" return pulumi.get(self,", "[ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: \"\"\" Description", "'count_details' to be a dict\") pulumi.set(__self__, \"count_details\", count_details) if created_at", "resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name if opts is", "argument 'lock_duration' to be a str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if", "None, resource_group_name: Optional[str] = None, subscription_name: Optional[str] = None, topic_name:", "return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]: \"\"\"", "not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument 'enable_batched_operations' to be a", "default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument", "\"name\", name) if requires_session and not isinstance(requires_session, bool): raise TypeError(\"Expected", "isinstance(max_delivery_count, int): raise TypeError(\"Expected argument 'max_delivery_count' to be a int\")", "count_details) if created_at and not isinstance(created_at, str): raise TypeError(\"Expected argument", "timespan for the subscription. The default value is 1 minute.", "type) if updated_at and not isinstance(updated_at, str): raise TypeError(\"Expected argument", "def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value that indicates whether a", "and not isinstance(requires_session, bool): raise TypeError(\"Expected argument 'requires_session' to be", "not isinstance(message_count, float): raise TypeError(\"Expected argument 'message_count' to be a", "which the topic is automatically deleted. The minimum duration is", "str: \"\"\" Last time there was a receive request to", "dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration,", "message is sent to Service Bus. This is the default", "pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload", "namespace_name: The namespace name :param str resource_group_name: Name of the", "not isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be a", "@pulumi.output_type class GetSubscriptionResult: \"\"\" Description of subscription resource. \"\"\" def", "argument 'accessed_at' to be a str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if", "batched operations are enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\")", "a str\") pulumi.set(__self__, \"name\", name) if requires_session and not isinstance(requires_session,", "topic is automatically deleted. The minimum duration is 5 minutes.", "duration timespan for the subscription. The default value is 1", "argument 'auto_delete_on_idle' to be a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if", "to be a dict\") pulumi.set(__self__, \"system_data\", system_data) if type and", "to the client id. \"\"\" return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\")", "resource. \"\"\" return pulumi.get(self, \"system_data\") @property @pulumi.getter def type(self) ->", "'name' to be a str\") pulumi.set(__self__, \"name\", name) if requires_session", "pulumi.set(__self__, \"forward_to\", forward_to) if id and not isinstance(id, str): raise", "and not isinstance(is_client_affine, bool): raise TypeError(\"Expected argument 'is_client_affine' to be", "pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message", "a str\") pulumi.set(__self__, \"forward_to\", forward_to) if id and not isinstance(id,", "if type and not isinstance(type, str): raise TypeError(\"Expected argument 'type'", "dead letter support when a message expires. \"\"\" return pulumi.get(self,", "GetSubscriptionResult: \"\"\" Description of subscription resource. \"\"\" def __init__(__self__, accessed_at=None,", "raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\",", "that indicates whether a subscription has dead letter support on", "messaging entity. \"\"\" return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self)", "pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self) -> str: \"\"\" Exact", "not isinstance(lock_duration, str): raise TypeError(\"Expected argument 'lock_duration' to be a", "\"\"\" return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]:", "\"\"\" return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]:", "not isinstance(system_data, dict): raise TypeError(\"Expected argument 'system_data' to be a", "be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and not", "be a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and not", "to be a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and", "bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration and not isinstance(lock_duration, str):", "if max_delivery_count and not isinstance(max_delivery_count, int): raise TypeError(\"Expected argument 'max_delivery_count'", "'requires_session' to be a bool\") pulumi.set(__self__, \"requires_session\", requires_session) if status", "values for the status of a messaging entity. \"\"\" return", "auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,", "dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count,", "max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name:", "\"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]: \"\"\" ISO 8061", "return pulumi.get(self, \"system_data\") @property @pulumi.getter def type(self) -> str: \"\"\"", "to be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and", "raise TypeError(\"Expected argument 'requires_session' to be a bool\") pulumi.set(__self__, \"requires_session\",", "8061 timeSpan idle interval after which the topic is automatically", "This is the duration after which the message expires, starting", "= namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] =", "Value indicating if a subscription supports the concept of sessions.", "\"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected", "isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument 'client_affine_properties' to be a dict\")", "Mapping, Optional, Sequence, Union, overload from ... import _utilities from", "and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be", "deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self) ->", "GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window,", "TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions)", "pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise", "namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name __args__['topicName'] = topic_name", "Name of the Resource group within the Azure subscription. :param", "count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id,", "from typing import Any, Mapping, Optional, Sequence, Union, overload from", "\"\"\" return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse':", "if enable_batched_operations and not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument 'enable_batched_operations'", "and not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be", "pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO", "raise TypeError(\"Expected argument 'client_affine_properties' to be a dict\") pulumi.set(__self__, \"client_affine_properties\",", "a str\") pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) ->", "TypeError(\"Expected argument 'accessed_at' to be a str\") pulumi.set(__self__, \"accessed_at\", accessed_at)", "None, topic_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) ->", "TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window)", "is_client_affine and not isinstance(is_client_affine, bool): raise TypeError(\"Expected argument 'is_client_affine' to", "the client id. \"\"\" return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def", "WARNING: this file was generated by the Pulumi SDK Generator.", "expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) ->", ":param str namespace_name: The namespace name :param str resource_group_name: Name", "import Any, Mapping, Optional, Sequence, Union, overload from ... import", "know what you are doing! *** import warnings import pulumi", "not isinstance(type, str): raise TypeError(\"Expected argument 'type' to be a", "def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message count details \"\"\" return", "return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self) -> float: \"\"\"", "count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None,", "a str\") pulumi.set(__self__, \"id\", id) if is_client_affine and not isinstance(is_client_affine,", "bool): raise TypeError(\"Expected argument 'is_client_affine' to be a bool\") pulumi.set(__self__,", "certain you know what you are doing! *** import warnings", "if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument 'auto_delete_on_idle'", "def default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO 8061 Default message timespan", "isinstance(type, str): raise TypeError(\"Expected argument 'type' to be a str\")", "lock duration timespan for the subscription. The default value is", "Optional[str] = None, subscription_name: Optional[str] = None, topic_name: Optional[str] =", "message_count) if name and not isinstance(name, str): raise TypeError(\"Expected argument", "__args__ = dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName']", "-> 'outputs.SystemDataResponse': \"\"\" The system meta data relating to this", "return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\"", "resource. \"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None,", "Number of messages. \"\"\" return pulumi.get(self, \"message_count\") @property @pulumi.getter def", "the subscription. The default value is 1 minute. \"\"\" return", "you're certain you know what you are doing! *** import", "id) if is_client_affine and not isinstance(is_client_affine, bool): raise TypeError(\"Expected argument", "client_affine_properties) if count_details and not isinstance(count_details, dict): raise TypeError(\"Expected argument", "to be a str\") pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def", "coding=utf-8 # *** WARNING: this file was generated by the", "system_data) if type and not isinstance(type, str): raise TypeError(\"Expected argument", "Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to client affine subscriptions. \"\"\" return", "\"\"\" Value that indicates whether server-side batched operations are enabled.", "type(self) -> str: \"\"\" Resource type \"\"\" return pulumi.get(self, \"type\")", "updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str: \"\"\" Last time", "id. \"\"\" return pulumi.get(self, \"is_client_affine\") @property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) ->", "\"requires_session\") @property @pulumi.getter def status(self) -> Optional[str]: \"\"\" Enumerates the", "auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO 8061 timeSpan idle interval after", "pulumi.set(__self__, \"system_data\", system_data) if type and not isinstance(type, str): raise", "timeSpan idle interval after which the topic is automatically deleted.", "subscription resource. :param str namespace_name: The namespace name :param str", "The topic name. \"\"\" __args__ = dict() __args__['namespaceName'] = namespace_name", "operations are enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def", "argument 'client_affine_properties' to be a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if", "name to forward the messages \"\"\" return pulumi.get(self, \"forward_to\") @property", "evaluation exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self)", "TypeError(\"Expected argument 'client_affine_properties' to be a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties)", "@property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str: \"\"\" Last time there", "pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO", "maximum deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self)", "indicates whether a subscription has dead letter support when a", "the concept of sessions. \"\"\" return pulumi.get(self, \"requires_session\") @property @pulumi.getter", "The exact time the message was updated. \"\"\" return pulumi.get(self,", "file was generated by the Pulumi SDK Generator. *** #", "pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count and not isinstance(message_count, float): raise", "TypeError(\"Expected argument 'status' to be a str\") pulumi.set(__self__, \"status\", status)", "to be a str\") pulumi.set(__self__, \"type\", type) if updated_at and", "\"\"\" Description of subscription resource. :param str namespace_name: The namespace", "lock_duration) if max_delivery_count and not isinstance(max_delivery_count, int): raise TypeError(\"Expected argument", "not edit by hand unless you're certain you know what", "not isinstance(updated_at, str): raise TypeError(\"Expected argument 'updated_at' to be a", "def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO 8601 timeSpan structure that", "dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to", "accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations,", "Optional[int]: \"\"\" Number of maximum deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\")", "the message expires, starting from when the message is sent", "str: \"\"\" Resource type \"\"\" return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\")", "auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument 'auto_delete_on_idle' to", "to be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and", "pulumi.get(self, \"requires_session\") @property @pulumi.getter def status(self) -> Optional[str]: \"\"\" Enumerates", "if updated_at and not isinstance(updated_at, str): raise TypeError(\"Expected argument 'updated_at'", "and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to be", "minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) ->", "\"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO 8061", "value. This is the duration after which the message expires,", "forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward the Dead", "a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties,", "pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations, bool): raise", "str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count and not isinstance(max_delivery_count, int):", "@pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]: \"\"\" Value indicating if a", "messages \"\"\" return pulumi.get(self, \"forward_to\") @property @pulumi.getter def id(self) ->", "# *** WARNING: this file was generated by the Pulumi", "@pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]: \"\"\" Value that indicates whether", "dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name __args__['subscriptionName'] = subscription_name", "\"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]: \"\"\" Value that", "was generated by the Pulumi SDK Generator. *** # ***", "not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument 'auto_delete_on_idle' to be a", "dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value that indicates whether a subscription", "'is_client_affine' to be a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration", "to be a float\") pulumi.set(__self__, \"message_count\", message_count) if name and", "has dead letter support when a message expires. \"\"\" return", "\"\"\" return pulumi.get(self, \"message_count\") @property @pulumi.getter def name(self) -> str:", "ISO 8601 timeSpan structure that defines the duration of the", "by hand unless you're certain you know what you are", "\"\"\" Queue/Topic name to forward the messages \"\"\" return pulumi.get(self,", "idle interval after which the topic is automatically deleted. The", "return pulumi.get(self, \"forward_to\") @property @pulumi.getter def id(self) -> str: \"\"\"", "str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be a str\") pulumi.set(__self__,", "'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: \"\"\" Description of", "Optional[bool]: \"\"\" Value that indicates whether server-side batched operations are", "details \"\"\" return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self) ->", "def __await__(self): if False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle,", "TypeError(\"Expected argument 'auto_delete_on_idle' to be a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle)", "be a bool\") pulumi.set(__self__, \"requires_session\", requires_session) if status and not", "message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str]", "argument 'count_details' to be a dict\") pulumi.set(__self__, \"count_details\", count_details) if", "\"\"\" Description of subscription resource. \"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None,", "if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window'", "name(self) -> str: \"\"\" Resource name \"\"\" return pulumi.get(self, \"name\")", "requires_session) if status and not isinstance(status, str): raise TypeError(\"Expected argument", "lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def", "be a str\") pulumi.set(__self__, \"status\", status) if system_data and not", "pulumi.set(__self__, \"status\", status) if system_data and not isinstance(system_data, dict): raise", "\"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]:", "\"\"\" Value indicating if a subscription supports the concept of", "created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine,", "default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None,", "system_data=None, type=None, updated_at=None): if accessed_at and not isinstance(accessed_at, str): raise", "str): raise TypeError(\"Expected argument 'lock_duration' to be a str\") pulumi.set(__self__,", "affine subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self)", "\"accessed_at\", accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected", "@pulumi.getter def type(self) -> str: \"\"\" Resource type \"\"\" return", "enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session,", "return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]: \"\"\"", "value is 10 minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\")", "raise TypeError(\"Expected argument 'accessed_at' to be a str\") pulumi.set(__self__, \"accessed_at\",", "status(self) -> Optional[str]: \"\"\" Enumerates the possible values for the", "a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to,", "str\") pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str:", "is 10 minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def", "\"forward_to\") @property @pulumi.getter def id(self) -> str: \"\"\" Resource Id", "you are doing! *** import warnings import pulumi import pulumi.runtime", "\"\"\" return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def message_count(self) -> float:", "raise TypeError(\"Expected argument 'created_at' to be a str\") pulumi.set(__self__, \"created_at\",", "a subscription has dead letter support when a message expires.", "return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]: \"\"\"", "forward_to and not isinstance(forward_to, str): raise TypeError(\"Expected argument 'forward_to' to", "TypeError(\"Expected argument 'forward_to' to be a str\") pulumi.set(__self__, \"forward_to\", forward_to)", "argument 'forward_to' to be a str\") pulumi.set(__self__, \"forward_to\", forward_to) if", "AwaitableGetSubscriptionResult: \"\"\" Description of subscription resource. :param str namespace_name: The", "str subscription_name: The subscription name. :param str topic_name: The topic", "indicates whether server-side batched operations are enabled. \"\"\" return pulumi.get(self,", "def id(self) -> str: \"\"\" Resource Id \"\"\" return pulumi.get(self,", "count details \"\"\" return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def created_at(self)", "of maximum deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\") @property @pulumi.getter(name=\"messageCount\") def", "8061 Default message timespan to live value. This is the", "TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\", id)", "if status and not isinstance(status, str): raise TypeError(\"Expected argument 'status'", "@pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO 8061 Default message", "-> Optional[str]: \"\"\" ISO 8061 lock duration timespan for the", "str: \"\"\" Resource Id \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\")", "that indicates whether a subscription has dead letter support when", "id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type,", "def auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO 8061 timeSpan idle interval", "\"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to, str): raise TypeError(\"Expected", "itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) ->", "'system_data' to be a dict\") pulumi.set(__self__, \"system_data\", system_data) if type", "client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to,", "to be a str\") pulumi.set(__self__, \"id\", id) if is_client_affine and", "str): raise TypeError(\"Expected argument 'auto_delete_on_idle' to be a str\") pulumi.set(__self__,", "a str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle and not isinstance(auto_delete_on_idle,", "is_client_affine(self) -> Optional[bool]: \"\"\" Value that indicates whether the subscription", "def name(self) -> str: \"\"\" Resource name \"\"\" return pulumi.get(self,", "def type(self) -> str: \"\"\" Resource type \"\"\" return pulumi.get(self,", "of the duplicate detection history. The default value is 10", "pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]: \"\"\" Value", "\"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self) -> Optional[bool]: \"\"\" Value indicating", "bool): raise TypeError(\"Expected argument 'requires_session' to be a bool\") pulumi.set(__self__,", "argument 'forward_dead_lettered_messages_to' to be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if", "a str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count and not isinstance(max_delivery_count,", "\"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]: \"\"\" Value that", "created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None,", "def status(self) -> Optional[str]: \"\"\" Enumerates the possible values for", "class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield", "Value that indicates whether a subscription has dead letter support", "lock_duration(self) -> Optional[str]: \"\"\" ISO 8061 lock duration timespan for", "pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties, dict): raise", "\"\"\" return pulumi.get(self, \"requires_session\") @property @pulumi.getter def status(self) -> Optional[str]:", "duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name,", "'outputs.MessageCountDetailsResponse': \"\"\" Message count details \"\"\" return pulumi.get(self, \"count_details\") @property", "str\") pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool):", "8601 timeSpan structure that defines the duration of the duplicate", "updated_at=None): if accessed_at and not isinstance(accessed_at, str): raise TypeError(\"Expected argument", "if client_affine_properties and not isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument 'client_affine_properties'", "pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration, bool): raise", "count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message count details \"\"\" return pulumi.get(self,", "def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward the", "pylint: disable=using-constant-test def __await__(self): if False: yield self return GetSubscriptionResult(", "if is_client_affine and not isinstance(is_client_affine, bool): raise TypeError(\"Expected argument 'is_client_affine'", "str\") pulumi.set(__self__, \"status\", status) if system_data and not isinstance(system_data, dict):", "TimeToLive is not set on a message itself. \"\"\" return", "return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\"", "\"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected", "str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and not isinstance(forward_to, str):", "accessed_at(self) -> str: \"\"\" Last time there was a receive", "str namespace_name: The namespace name :param str resource_group_name: Name of", "subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\") def count_details(self) ->", "and not isinstance(forward_to, str): raise TypeError(\"Expected argument 'forward_to' to be", "not isinstance(accessed_at, str): raise TypeError(\"Expected argument 'accessed_at' to be a", "TypeError(\"Expected argument 'default_message_time_to_live' to be a str\") pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live)", "pulumi.set(__self__, \"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str: \"\"\"", "\"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]: \"\"\" Queue/Topic name", "id(self) -> str: \"\"\" Resource Id \"\"\" return pulumi.get(self, \"id\")", "str): raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__,", "to be a str\") pulumi.set(__self__, \"name\", name) if requires_session and", "default value is 10 minutes. \"\"\" return pulumi.get(self, \"duplicate_detection_history_time_window\") @property", "Optional[str]: \"\"\" Enumerates the possible values for the status of", "is 5 minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def", "subscription supports the concept of sessions. \"\"\" return pulumi.get(self, \"requires_session\")", "a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations,", "resource_group_name: Name of the Resource group within the Azure subscription.", "be a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count and not", "client_affine_properties=None, count_details=None, created_at=None, dead_lettering_on_filter_evaluation_exceptions=None, dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None,", "-> AwaitableGetSubscriptionResult: \"\"\" Description of subscription resource. :param str namespace_name:", "of the Resource group within the Azure subscription. :param str", "subscription_name: The subscription name. :param str topic_name: The topic name.", "opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions,", "name \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self) ->", "import pulumi import pulumi.runtime from typing import Any, Mapping, Optional,", "and not isinstance(lock_duration, str): raise TypeError(\"Expected argument 'lock_duration' to be", "import _utilities from . import outputs __all__ = [ 'GetSubscriptionResult',", "def created_at(self) -> str: \"\"\" Exact time the message was", "\"lock_duration\", lock_duration) if max_delivery_count and not isinstance(max_delivery_count, int): raise TypeError(\"Expected", "to client affine subscriptions. \"\"\" return pulumi.get(self, \"client_affine_properties\") @property @pulumi.getter(name=\"countDetails\")", "time the message was created. \"\"\" return pulumi.get(self, \"created_at\") @property", "@property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value that indicates", "when the message is sent to Service Bus. This is", "is sent to Service Bus. This is the default value", "forward_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward the messages", "str: \"\"\" The exact time the message was updated. \"\"\"", "isinstance(created_at, str): raise TypeError(\"Expected argument 'created_at' to be a str\")", "\"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult): # pylint: disable=using-constant-test def __await__(self): if False:", "created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live, duplicate_detection_history_time_window=self.duplicate_detection_history_time_window, enable_batched_operations=self.enable_batched_operations, forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine,", "generated by the Pulumi SDK Generator. *** # *** Do", "argument 'max_delivery_count' to be a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if", "to be a dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details and", "created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument", "name=None, requires_session=None, status=None, system_data=None, type=None, updated_at=None): if accessed_at and not", "@property @pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str: \"\"\" The exact time", "not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a", "__all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult:", "message_count and not isinstance(message_count, float): raise TypeError(\"Expected argument 'message_count' to", "pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO", "isinstance(system_data, dict): raise TypeError(\"Expected argument 'system_data' to be a dict\")", "Value that indicates whether server-side batched operations are enabled. \"\"\"", "return pulumi.get(self, \"requires_session\") @property @pulumi.getter def status(self) -> Optional[str]: \"\"\"", "@property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value that indicates", "Optional[str]: \"\"\" ISO 8601 timeSpan structure that defines the duration", "-> Optional[bool]: \"\"\" Value that indicates whether server-side batched operations", "the status of a messaging entity. \"\"\" return pulumi.get(self, \"status\")", "isinstance(requires_session, bool): raise TypeError(\"Expected argument 'requires_session' to be a bool\")", "return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]: \"\"\"", "this resource. \"\"\" return pulumi.get(self, \"system_data\") @property @pulumi.getter def type(self)", "Optional[str]: \"\"\" Queue/Topic name to forward the messages \"\"\" return", "@property @pulumi.getter(name=\"createdAt\") def created_at(self) -> str: \"\"\" Exact time the", "of subscription resource. :param str namespace_name: The namespace name :param", "is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session, status=__ret__.status, system_data=__ret__.system_data, type=__ret__.type, updated_at=__ret__.updated_at)", "not isinstance(count_details, dict): raise TypeError(\"Expected argument 'count_details' to be a", "type \"\"\" return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self) ->", "TypeError(\"Expected argument 'message_count' to be a float\") pulumi.set(__self__, \"message_count\", message_count)", "duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations, bool): raise TypeError(\"Expected argument", "to be a str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count and", "and not isinstance(system_data, dict): raise TypeError(\"Expected argument 'system_data' to be", "the topic is automatically deleted. The minimum duration is 5", "message expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self)", "\"requires_session\", requires_session) if status and not isinstance(status, str): raise TypeError(\"Expected", "pulumi.get(self, \"system_data\") @property @pulumi.getter def type(self) -> str: \"\"\" Resource", "to be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and", "overload from ... import _utilities from . import outputs __all__", "and not isinstance(updated_at, str): raise TypeError(\"Expected argument 'updated_at' to be", "ISO 8061 timeSpan idle interval after which the topic is", "The minimum duration is 5 minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\")", "-> str: \"\"\" Resource type \"\"\" return pulumi.get(self, \"type\") @property", "Resource Id \"\"\" return pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self)", "The namespace name :param str resource_group_name: Name of the Resource", "opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult: \"\"\" Description of subscription", "duplicate detection history. The default value is 10 minutes. \"\"\"", "Resource type \"\"\" return pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self)", "@property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]: \"\"\" ISO 8061 timeSpan", "\"forward_to\", forward_to) if id and not isinstance(id, str): raise TypeError(\"Expected", "Generator. *** # *** Do not edit by hand unless", "not set on a message itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\")", "enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) ->", "has dead letter support on filter evaluation exceptions. \"\"\" return", "pulumi.set(__self__, \"requires_session\", requires_session) if status and not isinstance(status, str): raise", "to live value. This is the duration after which the", "return GetSubscriptionResult( accessed_at=self.accessed_at, auto_delete_on_idle=self.auto_delete_on_idle, client_affine_properties=self.client_affine_properties, count_details=self.count_details, created_at=self.created_at, dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration, default_message_time_to_live=self.default_message_time_to_live,", "TypeError(\"Expected argument 'enable_batched_operations' to be a bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations)", "Optional[str] = None, resource_group_name: Optional[str] = None, subscription_name: Optional[str] =", "The subscription name. :param str topic_name: The topic name. \"\"\"", "count_details and not isinstance(count_details, dict): raise TypeError(\"Expected argument 'count_details' to", "possible values for the status of a messaging entity. \"\"\"", "\"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live and not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected", "pulumi.get(self, \"duplicate_detection_history_time_window\") @property @pulumi.getter(name=\"enableBatchedOperations\") def enable_batched_operations(self) -> Optional[bool]: \"\"\" Value", "disable=using-constant-test def __await__(self): if False: yield self return GetSubscriptionResult( accessed_at=self.accessed_at,", "Sequence, Union, overload from ... import _utilities from . import", "duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to", "Description of subscription resource. \"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None,", "str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties, dict):", "to be a str\") pulumi.set(__self__, \"forward_to\", forward_to) if id and", "to be a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration and", "@property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to", "whether a subscription has dead letter support when a message", "Resource name \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def requires_session(self)", "-> Optional[bool]: \"\"\" Value indicating if a subscription supports the", "group within the Azure subscription. :param str subscription_name: The subscription", ". import outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ]", "not isinstance(duplicate_detection_history_time_window, str): raise TypeError(\"Expected argument 'duplicate_detection_history_time_window' to be a", "class GetSubscriptionResult: \"\"\" Description of subscription resource. \"\"\" def __init__(__self__,", "\"is_client_affine\", is_client_affine) if lock_duration and not isinstance(lock_duration, str): raise TypeError(\"Expected", "\"\"\" Properties specific to client affine subscriptions. \"\"\" return pulumi.get(self,", "\"\"\" The system meta data relating to this resource. \"\"\"", "and not isinstance(auto_delete_on_idle, str): raise TypeError(\"Expected argument 'auto_delete_on_idle' to be", "to be a bool\") pulumi.set(__self__, \"requires_session\", requires_session) if status and", "-> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific to client affine subscriptions. \"\"\"", "when a message expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\")", "forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None, message_count=None, name=None, requires_session=None, status=None,", "argument 'name' to be a str\") pulumi.set(__self__, \"name\", name) if", "be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not", "are doing! *** import warnings import pulumi import pulumi.runtime from", "is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at)", "argument 'duplicate_detection_history_time_window' to be a str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if", "pulumi.get(self, \"message_count\") @property @pulumi.getter def name(self) -> str: \"\"\" Resource", "supports the concept of sessions. \"\"\" return pulumi.get(self, \"requires_session\") @property", "pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ =", "= [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class GetSubscriptionResult: \"\"\"", "-> Optional[str]: \"\"\" ISO 8061 timeSpan idle interval after which", "isinstance(name, str): raise TypeError(\"Expected argument 'name' to be a str\")", "-> Optional[str]: \"\"\" Queue/Topic name to forward the Dead Letter", "updated_at and not isinstance(updated_at, str): raise TypeError(\"Expected argument 'updated_at' to", "None, subscription_name: Optional[str] = None, topic_name: Optional[str] = None, opts:", "\"count_details\", count_details) if created_at and not isinstance(created_at, str): raise TypeError(\"Expected", "str: \"\"\" Exact time the message was created. \"\"\" return", "\"\"\" Number of maximum deliveries. \"\"\" return pulumi.get(self, \"max_delivery_count\") @property", "status=self.status, system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None, resource_group_name:", "history. The default value is 10 minutes. \"\"\" return pulumi.get(self,", "pulumi.set(__self__, \"default_message_time_to_live\", default_message_time_to_live) if duplicate_detection_history_time_window and not isinstance(duplicate_detection_history_time_window, str): raise", "return pulumi.get(self, \"enable_batched_operations\") @property @pulumi.getter(name=\"forwardDeadLetteredMessagesTo\") def forward_dead_lettered_messages_to(self) -> Optional[str]: \"\"\"", "@property @pulumi.getter def name(self) -> str: \"\"\" Resource name \"\"\"", "for the status of a messaging entity. \"\"\" return pulumi.get(self,", "edit by hand unless you're certain you know what you", "\"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected", "\"\"\" Value that indicates whether the subscription has an affinity", "ISO 8061 lock duration timespan for the subscription. The default", "be a str\") pulumi.set(__self__, \"forward_to\", forward_to) if id and not", "\"\"\" The exact time the message was updated. \"\"\" return", "subscription has dead letter support on filter evaluation exceptions. \"\"\"", "@property @pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]: \"\"\" ISO 8061 lock", "Optional[str]: \"\"\" ISO 8061 lock duration timespan for the subscription.", "exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) ->", "The default value is 1 minute. \"\"\" return pulumi.get(self, \"lock_duration\")", "'dead_lettering_on_message_expiration' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration) if default_message_time_to_live", "dead_lettering_on_message_expiration=None, default_message_time_to_live=None, duplicate_detection_history_time_window=None, enable_batched_operations=None, forward_dead_lettered_messages_to=None, forward_to=None, id=None, is_client_affine=None, lock_duration=None, max_delivery_count=None,", "\"updated_at\", updated_at) @property @pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str: \"\"\" Last", "return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\"", "dict\") pulumi.set(__self__, \"client_affine_properties\", client_affine_properties) if count_details and not isinstance(count_details, dict):", "if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to'", "status and not isinstance(status, str): raise TypeError(\"Expected argument 'status' to", "Do not edit by hand unless you're certain you know", "bool\") pulumi.set(__self__, \"enable_batched_operations\", enable_batched_operations) if forward_dead_lettered_messages_to and not isinstance(forward_dead_lettered_messages_to, str):", "\"\"\" Enumerates the possible values for the status of a", "= pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties,", "support on filter evaluation exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property", "argument 'type' to be a str\") pulumi.set(__self__, \"type\", type) if", "@property @pulumi.getter(name=\"countDetails\") def count_details(self) -> 'outputs.MessageCountDetailsResponse': \"\"\" Message count details", "pulumi.get(self, \"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str: \"\"\" The", "'forward_dead_lettered_messages_to' to be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to", "raise TypeError(\"Expected argument 'is_client_affine' to be a bool\") pulumi.set(__self__, \"is_client_affine\",", "auto_delete_on_idle) if client_affine_properties and not isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument", "namespace name :param str resource_group_name: Name of the Resource group", "is automatically deleted. The minimum duration is 5 minutes. \"\"\"", "\"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) -> Optional['outputs.SBClientAffinePropertiesResponse']: \"\"\" Properties specific", "enable_batched_operations(self) -> Optional[bool]: \"\"\" Value that indicates whether server-side batched", "\"\"\" return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self) -> Optional[str]:", "@pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str: \"\"\" The exact time the", "a bool\") pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration and not isinstance(lock_duration,", "typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle, client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration,", "# *** Do not edit by hand unless you're certain", "'type' to be a str\") pulumi.set(__self__, \"type\", type) if updated_at", "typing import Any, Mapping, Optional, Sequence, Union, overload from ...", "\"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\") @property @pulumi.getter(name=\"defaultMessageTimeToLive\") def default_message_time_to_live(self) -> Optional[str]:", "not isinstance(created_at, str): raise TypeError(\"Expected argument 'created_at' to be a", "if created_at and not isinstance(created_at, str): raise TypeError(\"Expected argument 'created_at'", "indicates whether the subscription has an affinity to the client", "__args__['topicName'] = topic_name if opts is None: opts = pulumi.InvokeOptions()", "a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\", dead_lettering_on_filter_evaluation_exceptions) if dead_lettering_on_message_expiration and not isinstance(dead_lettering_on_message_expiration,", "This is the default value used when TimeToLive is not", "@pulumi.getter(name=\"lockDuration\") def lock_duration(self) -> Optional[str]: \"\"\" ISO 8061 lock duration", "within the Azure subscription. :param str subscription_name: The subscription name.", "client_affine_properties=__ret__.client_affine_properties, count_details=__ret__.count_details, created_at=__ret__.created_at, dead_lettering_on_filter_evaluation_exceptions=__ret__.dead_lettering_on_filter_evaluation_exceptions, dead_lettering_on_message_expiration=__ret__.dead_lettering_on_message_expiration, default_message_time_to_live=__ret__.default_message_time_to_live, duplicate_detection_history_time_window=__ret__.duplicate_detection_history_time_window, enable_batched_operations=__ret__.enable_batched_operations, forward_dead_lettered_messages_to=__ret__.forward_dead_lettered_messages_to, forward_to=__ret__.forward_to,", "\"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self) -> Optional[int]: \"\"\" Number of", "@property @pulumi.getter(name=\"messageCount\") def message_count(self) -> float: \"\"\" Number of messages.", "type=None, updated_at=None): if accessed_at and not isinstance(accessed_at, str): raise TypeError(\"Expected", "and not isinstance(type, str): raise TypeError(\"Expected argument 'type' to be", "of subscription resource. \"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None,", "pulumi.set(__self__, \"count_details\", count_details) if created_at and not isinstance(created_at, str): raise", "str\") pulumi.set(__self__, \"duplicate_detection_history_time_window\", duplicate_detection_history_time_window) if enable_batched_operations and not isinstance(enable_batched_operations, bool):", "argument 'status' to be a str\") pulumi.set(__self__, \"status\", status) if", "pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and not isinstance(dead_lettering_on_filter_evaluation_exceptions, bool): raise", "__ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value return AwaitableGetSubscriptionResult( accessed_at=__ret__.accessed_at, auto_delete_on_idle=__ret__.auto_delete_on_idle,", "@pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def dead_lettering_on_message_expiration(self) -> Optional[bool]: \"\"\" Value that indicates whether", "Azure subscription. :param str subscription_name: The subscription name. :param str", "the message was created. \"\"\" return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\")", "raise TypeError(\"Expected argument 'dead_lettering_on_filter_evaluation_exceptions' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_filter_evaluation_exceptions\",", "'id' to be a str\") pulumi.set(__self__, \"id\", id) if is_client_affine", "\"\"\" Resource name \"\"\" return pulumi.get(self, \"name\") @property @pulumi.getter(name=\"requiresSession\") def", "letter support when a message expires. \"\"\" return pulumi.get(self, \"dead_lettering_on_message_expiration\")", "@property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO 8601 timeSpan", "duration of the duplicate detection history. The default value is", "@pulumi.getter(name=\"messageCount\") def message_count(self) -> float: \"\"\" Number of messages. \"\"\"", "isinstance(forward_dead_lettered_messages_to, str): raise TypeError(\"Expected argument 'forward_dead_lettered_messages_to' to be a str\")", "duplicate_detection_history_time_window(self) -> Optional[str]: \"\"\" ISO 8601 timeSpan structure that defines", "forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to, forward_to=self.forward_to, id=self.id, is_client_affine=self.is_client_affine, lock_duration=self.lock_duration, max_delivery_count=self.max_delivery_count, message_count=self.message_count, name=self.name, requires_session=self.requires_session, status=self.status,", "TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be a bool\") pulumi.set(__self__, \"dead_lettering_on_message_expiration\", dead_lettering_on_message_expiration)", "SDK Generator. *** # *** Do not edit by hand", "is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version", "pulumi.set(__self__, \"type\", type) if updated_at and not isinstance(updated_at, str): raise", "pulumi.set(__self__, \"is_client_affine\", is_client_affine) if lock_duration and not isinstance(lock_duration, str): raise", "receive request to this subscription. \"\"\" return pulumi.get(self, \"accessed_at\") @property", "not isinstance(default_message_time_to_live, str): raise TypeError(\"Expected argument 'default_message_time_to_live' to be a", "to be a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and", "default_message_time_to_live(self) -> Optional[str]: \"\"\" ISO 8061 Default message timespan to", "the duration after which the message expires, starting from when", "forward_to=__ret__.forward_to, id=__ret__.id, is_client_affine=__ret__.is_client_affine, lock_duration=__ret__.lock_duration, max_delivery_count=__ret__.max_delivery_count, message_count=__ret__.message_count, name=__ret__.name, requires_session=__ret__.requires_session, status=__ret__.status, system_data=__ret__.system_data,", "system meta data relating to this resource. \"\"\" return pulumi.get(self,", "was created. \"\"\" return pulumi.get(self, \"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self)", "pulumi.get(self, \"id\") @property @pulumi.getter(name=\"isClientAffine\") def is_client_affine(self) -> Optional[bool]: \"\"\" Value", "\"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse': \"\"\" The system", "if message_count and not isinstance(message_count, float): raise TypeError(\"Expected argument 'message_count'", "this subscription. \"\"\" return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def auto_delete_on_idle(self)", ":param str subscription_name: The subscription name. :param str topic_name: The", "1 minute. \"\"\" return pulumi.get(self, \"lock_duration\") @property @pulumi.getter(name=\"maxDeliveryCount\") def max_delivery_count(self)", "argument 'requires_session' to be a bool\") pulumi.set(__self__, \"requires_session\", requires_session) if", "\"created_at\") @property @pulumi.getter(name=\"deadLetteringOnFilterEvaluationExceptions\") def dead_lettering_on_filter_evaluation_exceptions(self) -> Optional[bool]: \"\"\" Value that", "of sessions. \"\"\" return pulumi.get(self, \"requires_session\") @property @pulumi.getter def status(self)", "TypeError(\"Expected argument 'requires_session' to be a bool\") pulumi.set(__self__, \"requires_session\", requires_session)", "raise TypeError(\"Expected argument 'id' to be a str\") pulumi.set(__self__, \"id\",", "Message count details \"\"\" return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\") def", "subscription resource. \"\"\" def __init__(__self__, accessed_at=None, auto_delete_on_idle=None, client_affine_properties=None, count_details=None, created_at=None,", "of a messaging entity. \"\"\" return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\")", "set on a message itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property", "@pulumi.getter(name=\"accessedAt\") def accessed_at(self) -> str: \"\"\" Last time there was", "by the Pulumi SDK Generator. *** # *** Do not", "and not isinstance(client_affine_properties, dict): raise TypeError(\"Expected argument 'client_affine_properties' to be", "def get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str] = None, subscription_name:", "\"\"\" __args__ = dict() __args__['namespaceName'] = namespace_name __args__['resourceGroupName'] = resource_group_name", "'forward_to' to be a str\") pulumi.set(__self__, \"forward_to\", forward_to) if id", "id and not isinstance(id, str): raise TypeError(\"Expected argument 'id' to", "dict\") pulumi.set(__self__, \"system_data\", system_data) if type and not isinstance(type, str):", "'message_count' to be a float\") pulumi.set(__self__, \"message_count\", message_count) if name", "def updated_at(self) -> str: \"\"\" The exact time the message", "= topic_name if opts is None: opts = pulumi.InvokeOptions() if", "Optional[str]: \"\"\" Queue/Topic name to forward the Dead Letter message", ":param str topic_name: The topic name. \"\"\" __args__ = dict()", "a bool\") pulumi.set(__self__, \"requires_session\", requires_session) if status and not isinstance(status,", "if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-native:servicebus/v20210601preview:getSubscription',", "name and not isinstance(name, str): raise TypeError(\"Expected argument 'name' to", "created_at(self) -> str: \"\"\" Exact time the message was created.", "@pulumi.getter(name=\"forwardTo\") def forward_to(self) -> Optional[str]: \"\"\" Queue/Topic name to forward", "sessions. \"\"\" return pulumi.get(self, \"requires_session\") @property @pulumi.getter def status(self) ->", "not isinstance(requires_session, bool): raise TypeError(\"Expected argument 'requires_session' to be a", "whether server-side batched operations are enabled. \"\"\" return pulumi.get(self, \"enable_batched_operations\")", "be a str\") pulumi.set(__self__, \"created_at\", created_at) if dead_lettering_on_filter_evaluation_exceptions and not", "isinstance(dead_lettering_on_message_expiration, bool): raise TypeError(\"Expected argument 'dead_lettering_on_message_expiration' to be a bool\")", "and not isinstance(max_delivery_count, int): raise TypeError(\"Expected argument 'max_delivery_count' to be", "minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\") def client_affine_properties(self) ->", "filter evaluation exceptions. \"\"\" return pulumi.get(self, \"dead_lettering_on_filter_evaluation_exceptions\") @property @pulumi.getter(name=\"deadLetteringOnMessageExpiration\") def", "def max_delivery_count(self) -> Optional[int]: \"\"\" Number of maximum deliveries. \"\"\"", "str): raise TypeError(\"Expected argument 'updated_at' to be a str\") pulumi.set(__self__,", "'lock_duration' to be a str\") pulumi.set(__self__, \"lock_duration\", lock_duration) if max_delivery_count", "the duration of the duplicate detection history. The default value", "forward the Dead Letter message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property", "int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count and not isinstance(message_count, float):", "a int\") pulumi.set(__self__, \"max_delivery_count\", max_delivery_count) if message_count and not isinstance(message_count,", "\"\"\" return pulumi.get(self, \"forward_to\") @property @pulumi.getter def id(self) -> str:", "float: \"\"\" Number of messages. \"\"\" return pulumi.get(self, \"message_count\") @property", "the message was updated. \"\"\" return pulumi.get(self, \"updated_at\") class AwaitableGetSubscriptionResult(GetSubscriptionResult):", "if requires_session and not isinstance(requires_session, bool): raise TypeError(\"Expected argument 'requires_session'", "on a message itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\")", "\"id\", id) if is_client_affine and not isinstance(is_client_affine, bool): raise TypeError(\"Expected", "to this subscription. \"\"\" return pulumi.get(self, \"accessed_at\") @property @pulumi.getter(name=\"autoDeleteOnIdle\") def", "Queue/Topic name to forward the messages \"\"\" return pulumi.get(self, \"forward_to\")", "an affinity to the client id. \"\"\" return pulumi.get(self, \"is_client_affine\")", "entity. \"\"\" return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self) ->", "Dead Letter message \"\"\" return pulumi.get(self, \"forward_dead_lettered_messages_to\") @property @pulumi.getter(name=\"forwardTo\") def", "Bus. This is the default value used when TimeToLive is", "be a str\") pulumi.set(__self__, \"forward_dead_lettered_messages_to\", forward_dead_lettered_messages_to) if forward_to and not", "max_delivery_count(self) -> Optional[int]: \"\"\" Number of maximum deliveries. \"\"\" return", "return pulumi.get(self, \"status\") @property @pulumi.getter(name=\"systemData\") def system_data(self) -> 'outputs.SystemDataResponse': \"\"\"", "duration is 5 minutes. \"\"\" return pulumi.get(self, \"auto_delete_on_idle\") @property @pulumi.getter(name=\"clientAffineProperties\")", "\"type\") @property @pulumi.getter(name=\"updatedAt\") def updated_at(self) -> str: \"\"\" The exact", "system_data=self.system_data, type=self.type, updated_at=self.updated_at) def get_subscription(namespace_name: Optional[str] = None, resource_group_name: Optional[str]", "outputs __all__ = [ 'GetSubscriptionResult', 'AwaitableGetSubscriptionResult', 'get_subscription', ] @pulumi.output_type class", "be a str\") pulumi.set(__self__, \"accessed_at\", accessed_at) if auto_delete_on_idle and not", "if accessed_at and not isinstance(accessed_at, str): raise TypeError(\"Expected argument 'accessed_at'", "be a str\") pulumi.set(__self__, \"auto_delete_on_idle\", auto_delete_on_idle) if client_affine_properties and not", "raise TypeError(\"Expected argument 'enable_batched_operations' to be a bool\") pulumi.set(__self__, \"enable_batched_operations\",", "\"\"\" Message count details \"\"\" return pulumi.get(self, \"count_details\") @property @pulumi.getter(name=\"createdAt\")", "a message itself. \"\"\" return pulumi.get(self, \"default_message_time_to_live\") @property @pulumi.getter(name=\"duplicateDetectionHistoryTimeWindow\") def" ]
[ "HPX # Start of horizontal sync pulse LCD_HSYNC0 = HFP", "= 20 # Horizontal Back Porch HFP = 10 #", "distribute this software, either in source code form or as", "1 here # Define the constants needed by the EVE", "using the largest possible line time in order to #", "Vertical Line Padding # FTDI needs at least 1 here", "Back Porch VFP = 4 # Vertical Front Porch VLP", "any and all copyright interest in the # software to", "all present and future rights to this # software under", "Padding # FTDI needs at least 1 here # Define", "VLP = 1 # Vertical Line Padding # FTDI needs", "possible line time in order to # maximize the time", "and by any # means. # In jurisdictions that recognize", "on the timing # Active width of LCD display LCD_WIDTH", "= 2 # Vertical Back Porch VFP = 4 #", "copyright interest in the # software to the public domain.", "KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "# Vertical timing VLH = 400 # Vertical Line Height", "400 # Vertical Line Height VS = 2 # Vertical", "PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This is not a 24-bit", "copyright law. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "to the public domain. We make this dedication for the", "Speed EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE", "pulse LCD_HSYNC1 = HFP+HSW # Start of active line LCD_HOFFSET", "Active height of LCD display LCD_HEIGHT = VLH # Start", "least 1 here # Define the constants needed by the", "# Pixel clock divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate", "number of clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- #", "HPP = 209 # Horizontal Pixel Padding # FTDI needs", "LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0 # Spread", "that recognize copyright laws, the author or authors # of", "1=10mA LCD_DRIVE_10MA = 0 # Spread Spectrum on RGB signals.", "Python example library for FTDI / BridgeTek # EVE graphic", "the timing # Active height of LCD display LCD_HEIGHT =", "DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF", "of horizontal sync pulse LCD_HSYNC1 = HFP+HSW # Start of", "= 5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz / 16.7mS #----------------------------------------------------------------------------", "# Anyone is free to copy, modify, publish, use, compile,", "LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A", "# In jurisdictions that recognize copyright laws, the author or", "Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is", "strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0 # Spread Spectrum on", "WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "# Start of active line LCD_HOFFSET = HFP+HSW+HBP # Total", "= 60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal timing # Target", "graphic accelerators. # #--------------------------------------------------------------------------- # # This file is part", "LCD_SWIZZLE = 2 # Define active edge of PCLK. Observed", "the EVE based on the timing # Active height of", "of the data. LCD_PCLKPOL = 0 # LCD drive strength:", "Start of active screen LCD_VOFFSET = VFP+VS+VBP # Total number", "of active line LCD_HOFFSET = HFP+HSW+HBP # Total number of", "TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR", "the EVE has to process each line. HPX = 240", "under copyright law. # THE SOFTWARE IS PROVIDED \"AS IS\",", "1 # Vertical Line Padding # FTDI needs at least", "THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "OTHER DEALINGS IN THE SOFTWARE. # For more information, please", "higher # PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This is not", "that the EVE has to process each line. HPX =", "20 # Horizontal Back Porch HFP = 10 # Horizontal", "LCD_WIDTH = HPX # Start of horizontal sync pulse LCD_HSYNC0", "Vertical timing VLH = 400 # Vertical Line Height VS", "with rising edge of the clock. # Falling edge of", "Start of vertical sync pulse LCD_VSYNC0 = VFP # End", "= 0 # LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA =", "VS = 2 # Vertical Sync (in lines) VBP =", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "0 # LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0", "# binary, for any purpose, commercial or non-commercial, and by", "= 0 # Pixel clock divisor LCD_PCLK = 5 #----------------------------------------------------------------------------", "overt act of # relinquishment in perpetuity of all present", "Data is put out coincident with falling edge of the", "source code form or as a compiled # binary, for", "the data. LCD_PCLKPOL = 0 # LCD drive strength: 0=5mA,", "is not a 24-bit display, so dither LCD_DITHER = 0", "of clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical", "horizontal sync pulse LCD_HSYNC1 = HFP+HSW # Start of active", "# EVE graphic accelerators. # #--------------------------------------------------------------------------- # # This file", "edge of PCLK. Observed by scope: # 0: Data is", "CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR", "Porch HPP = 209 # Horizontal Pixel Padding # FTDI", "LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH = 400", "# of this software dedicate any and all copyright interest", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "Horizontal Pixel Width HSW = 10 # Horizontal Sync Width", "60Hz frame rate, using the largest possible line time in", "TOUCH_GOODIX_CAPACITIVE = False # Define RGB output pins order, determined", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "LCD_VOFFSET = VFP+VS+VBP # Total number of lines per screen", "Horizontal timing # Target 60Hz frame rate, using the largest", "Vertical Line Height VS = 2 # Vertical Sync (in", "EVE based on the timing # Active height of LCD", "0 # Spread Spectrum on RGB signals. Probably not a", "RGB signals. Probably not a good idea at higher #", "free and unencumbered software released into the public domain. #", "for FTDI / BridgeTek # EVE graphic accelerators. # #---------------------------------------------------------------------------", "screen LCD_VOFFSET = VFP+VS+VBP # Total number of lines per", "display, so dither LCD_DITHER = 0 # Pixel clock divisor", "America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is free", "the data. # 1: Data is put out coincident with", "timing # Active width of LCD display LCD_WIDTH = HPX", "of LCD display LCD_HEIGHT = VLH # Start of vertical", "LCD display LCD_WIDTH = HPX # Start of horizontal sync", "sync pulse LCD_VSYNC0 = VFP # End of vertical sync", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT", "line. HPX = 240 # Horizontal Pixel Width HSW =", "RGB output pins order, determined by PCB layout LCD_SWIZZLE =", "software to the public domain. We make this dedication for", "coincident with falling edge of the clock. # Rising edge", "# For more information, please refer to <http:#unlicense.org/> # #============================================================================", "falling edge of the clock. # Rising edge of the", "or non-commercial, and by any # means. # In jurisdictions", "24-bit display, so dither LCD_DITHER = 0 # Pixel clock", "FTDI / BridgeTek # EVE graphic accelerators. # #--------------------------------------------------------------------------- #", "the clock is in the middle of the data. #", "Define active edge of PCLK. Observed by scope: # 0:", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "based EVE libraries # to Python for Crystalfontz EVE based", "Crystalfontz Raspberry-Pi Python example library for FTDI / BridgeTek #", "for any purpose, commercial or non-commercial, and by any #", "LCD display LCD_HEIGHT = VLH # Start of vertical sync", "public domain. We make this dedication for the benefit #", "a 24-bit display, so dither LCD_DITHER = 0 # Pixel", "We make this dedication for the benefit # of the", "Data is put out coincident with rising edge of the", "LCD_HOFFSET = HFP+HSW+HBP # Total number of clocks per line", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL", "domain. # Anyone is free to copy, modify, publish, use,", "EVE Clock Speed EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE =", "this # software under copyright law. # THE SOFTWARE IS", "timing # Target 60Hz frame rate, using the largest possible", "PCB layout LCD_SWIZZLE = 2 # Define active edge of", "# EVE Clock Speed EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE", "EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE =", "time that the EVE has to process each line. HPX", "ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT", "maximize the time that the EVE has to process each", "PCLK. Observed by scope: # 0: Data is put out", "of vertical sync pulse LCD_VSYNC0 = VFP # End of", "means. # In jurisdictions that recognize copyright laws, the author", "data. # 1: Data is put out coincident with rising", "commercial or non-commercial, and by any # means. # In", "pulse LCD_VSYNC1 = VFP+VS # Start of active screen LCD_VOFFSET", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "LCD_HEIGHT = VLH # Start of vertical sync pulse LCD_VSYNC0", "FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN", "(in lines) VBP = 2 # Vertical Back Porch VFP", "IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED,", "LCD_PCLKPOL = 0 # LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA", "Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False", "HFP+HSW # Start of active line LCD_HOFFSET = HFP+HSW+HBP #", "by the EVE based on the timing # Active height", "0: Data is put out coincident with falling edge of", "SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "the time that the EVE has to process each line.", "Porch VFP = 4 # Vertical Front Porch VLP =", "in the middle of the data. # 1: Data is", "TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN", "#--------------------------------------------------------------------------- # # This is free and unencumbered software released", "EVE based displays. # # 2021-10-20 <NAME> / Crystalfontz America", "dedication to be an overt act of # relinquishment in", "# Falling edge of the clock is in the middle", "to be an overt act of # relinquishment in perpetuity", "not a 24-bit display, so dither LCD_DITHER = 0 #", "software, either in source code form or as a compiled", "# Horizontal timing # Target 60Hz frame rate, using the", "End of vertical sync pulse LCD_VSYNC1 = VFP+VS # Start", "heirs and # successors. We intend this dedication to be", "Horizontal Front Porch HPP = 209 # Horizontal Pixel Padding", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM,", "# ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "needed by the EVE based on the timing # Active", "vertical sync pulse LCD_VSYNC0 = VFP # End of vertical", "and unencumbered software released into the public domain. # Anyone", "of active screen LCD_VOFFSET = VFP+VS+VBP # Total number of", "60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal timing # Target 60Hz", "needs at least 1 here # Define the constants needed", "and future rights to this # software under copyright law.", "for the benefit # of the public at large and", "In jurisdictions that recognize copyright laws, the author or authors", "display LCD_WIDTH = HPX # Start of horizontal sync pulse", "library for FTDI / BridgeTek # EVE graphic accelerators. #", "Target 60Hz frame rate, using the largest possible line time", "line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH =", "is free and unencumbered software released into the public domain.", "# Spread Spectrum on RGB signals. Probably not a good", "the middle of the data. LCD_PCLKPOL = 0 # LCD", "edge of the clock is in the middle of the", "clock is in the middle of the data. LCD_PCLKPOL =", "existing C based EVE libraries # to Python for Crystalfontz", "Horizontal Sync Width HBP = 20 # Horizontal Back Porch", "# # 2021-10-20 <NAME> / Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS", "data. LCD_PCLKPOL = 0 # LCD drive strength: 0=5mA, 1=10mA", "based displays. # # 2021-10-20 <NAME> / Crystalfontz America Inc.", "= 2 # Define active edge of PCLK. Observed by", "copy, modify, publish, use, compile, sell, or # distribute this", "Frame_Rate = 60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal timing #", "= 811 # EVE Clock Speed EVE_CLOCK_SPEED = 60000000 #", "# Start of vertical sync pulse LCD_VSYNC0 = VFP #", "middle of the data. LCD_PCLKPOL = 0 # LCD drive", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #", "OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF", "constants needed by the EVE based on the timing #", "VFP # End of vertical sync pulse LCD_VSYNC1 = VFP+VS", "/ 16.7mS #---------------------------------------------------------------------------- # Horizontal timing # Target 60Hz frame", "# #============================================================================ #EVE Device Type EVE_DEVICE = 811 # EVE", "unencumbered software released into the public domain. # Anyone is", "<NAME> / Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # #", "# Crystalfontz Raspberry-Pi Python example library for FTDI / BridgeTek", "of existing C based EVE libraries # to Python for", "to the detriment of our heirs and # successors. We", "Vertical Front Porch VLP = 1 # Vertical Line Padding", "all copyright interest in the # software to the public", "PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE", "HFP = 10 # Horizontal Front Porch HPP = 209", "recognize copyright laws, the author or authors # of this", "Porch VLP = 1 # Vertical Line Padding # FTDI", "ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN", "SOFTWARE. # For more information, please refer to <http:#unlicense.org/> #", "SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE", "#---------------------------------------------------------------------------- # Frame_Rate = 60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal", "= 60000000 # Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False", "<http:#unlicense.org/> # #============================================================================ #EVE Device Type EVE_DEVICE = 811 #", "clock is in the middle of the data. # 1:", "Define the constants needed by the EVE based on the", "1: Data is put out coincident with rising edge of", "of the public at large and to the detriment of", "# Horizontal Pixel Padding # FTDI needs at least 1", "# distribute this software, either in source code form or", "Width HSW = 10 # Horizontal Sync Width HBP =", "Front Porch VLP = 1 # Vertical Line Padding #", "# Start of active screen LCD_VOFFSET = VFP+VS+VBP # Total", "active screen LCD_VOFFSET = VFP+VS+VBP # Total number of lines", "libraries # to Python for Crystalfontz EVE based displays. #", "= HFP+HSW+HBP # Total number of clocks per line LCD_HCYCLE", "Vertical Back Porch VFP = 4 # Vertical Front Porch", "EVE libraries # to Python for Crystalfontz EVE based displays.", "in order to # maximize the time that the EVE", "display LCD_HEIGHT = VLH # Start of vertical sync pulse", "= False # Define RGB output pins order, determined by", "large and to the detriment of our heirs and #", "# Frame_Rate = 60Hz / 16.7mS #---------------------------------------------------------------------------- # Horizontal timing", "height of LCD display LCD_HEIGHT = VLH # Start of", "benefit # of the public at large and to the", "Rising edge of the clock is in the middle of", "port/adaptation of existing C based EVE libraries # to Python", "Vertical Sync (in lines) VBP = 2 # Vertical Back", "# Vertical Front Porch VLP = 1 # Vertical Line", "= 10 # Horizontal Front Porch HPP = 209 #", "Anyone is free to copy, modify, publish, use, compile, sell,", "# End of horizontal sync pulse LCD_HSYNC1 = HFP+HSW #", "= VLH # Start of vertical sync pulse LCD_VSYNC0 =", "#============================================================================ #EVE Device Type EVE_DEVICE = 811 # EVE Clock", "209 # Horizontal Pixel Padding # FTDI needs at least", "PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS", "a compiled # binary, for any purpose, commercial or non-commercial,", "in the middle of the data. LCD_PCLKPOL = 0 #", "Horizontal Pixel Padding # FTDI needs at least 1 here", "IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY", "= HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH = 400 #", "authors # of this software dedicate any and all copyright", "THE SOFTWARE. # For more information, please refer to <http:#unlicense.org/>", "FTDI needs at least 1 here # Define the constants", "this dedication to be an overt act of # relinquishment", "OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT", "# of the public at large and to the detriment", "NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE", "released into the public domain. # Anyone is free to", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "10 # Horizontal Sync Width HBP = 20 # Horizontal", "out coincident with falling edge of the clock. # Rising", "WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS", "5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz / 16.7mS #---------------------------------------------------------------------------- #", "clock. # Falling edge of the clock is in the", "edge of the clock. # Rising edge of the clock", "sync pulse LCD_HSYNC0 = HFP # End of horizontal sync", "= 400 # Vertical Line Height VS = 2 #", "to copy, modify, publish, use, compile, sell, or # distribute", "VFP = 4 # Vertical Front Porch VLP = 1", "End of horizontal sync pulse LCD_HSYNC1 = HFP+HSW # Start", "<reponame>crystalfontz/CFA-EVE-Python-Library<gh_stars>1-10 #=========================================================================== # # Crystalfontz Raspberry-Pi Python example library for", "This file is part of the port/adaptation of existing C", "# Vertical Line Padding # FTDI needs at least 1", "HPX = 240 # Horizontal Pixel Width HSW = 10", "EVE has to process each line. HPX = 240 #", "= 0 #This is not a 24-bit display, so dither", "EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES", "example library for FTDI / BridgeTek # EVE graphic accelerators.", "Front Porch HPP = 209 # Horizontal Pixel Padding #", "= VFP+VS+VBP # Total number of lines per screen LCD_VCYCLE", "dedication for the benefit # of the public at large", "= 1 # Vertical Line Padding # FTDI needs at", "sell, or # distribute this software, either in source code", "# Horizontal Sync Width HBP = 20 # Horizontal Back", "of all present and future rights to this # software", "Crystalfontz EVE based displays. # # 2021-10-20 <NAME> / Crystalfontz", "Spectrum on RGB signals. Probably not a good idea at", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "a good idea at higher # PCLK frequencies. LCD_PCLK_CSPREAD =", "# Define the constants needed by the EVE based on", "laws, the author or authors # of this software dedicate", "TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False # Define RGB output", "2 # Define active edge of PCLK. Observed by scope:", "OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE.", "Observed by scope: # 0: Data is put out coincident", "0 #This is not a 24-bit display, so dither LCD_DITHER", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "horizontal sync pulse LCD_HSYNC0 = HFP # End of horizontal", "signals. Probably not a good idea at higher # PCLK", "VFP+VS+VBP # Total number of lines per screen LCD_VCYCLE =", "WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING", "edge of the clock. # Falling edge of the clock", "= 240 # Horizontal Pixel Width HSW = 10 #", "good idea at higher # PCLK frequencies. LCD_PCLK_CSPREAD = 0", "rights to this # software under copyright law. # THE", "based on the timing # Active height of LCD display", "Start of active line LCD_HOFFSET = HFP+HSW+HBP # Total number", "not a good idea at higher # PCLK frequencies. LCD_PCLK_CSPREAD", "the clock. # Rising edge of the clock is in", "the middle of the data. # 1: Data is put", "this dedication for the benefit # of the public at", "rising edge of the clock. # Falling edge of the", "# means. # In jurisdictions that recognize copyright laws, the", "Probably not a good idea at higher # PCLK frequencies.", "HBP = 20 # Horizontal Back Porch HFP = 10", "put out coincident with falling edge of the clock. #", "# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. #", "either in source code form or as a compiled #", "in perpetuity of all present and future rights to this", "# OTHER DEALINGS IN THE SOFTWARE. # For more information,", "LCD_VSYNC0 = VFP # End of vertical sync pulse LCD_VSYNC1", "#---------------------------------------------------------------------------- # Horizontal timing # Target 60Hz frame rate, using", "# # Crystalfontz Raspberry-Pi Python example library for FTDI /", "= HFP # End of horizontal sync pulse LCD_HSYNC1 =", "file is part of the port/adaptation of existing C based", "EVE graphic accelerators. # #--------------------------------------------------------------------------- # # This file is", "Horizontal Back Porch HFP = 10 # Horizontal Front Porch", "at large and to the detriment of our heirs and", "= False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False # Define", "Back Porch HFP = 10 # Horizontal Front Porch HPP", "of PCLK. Observed by scope: # 0: Data is put", "THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN", "use, compile, sell, or # distribute this software, either in", "DEALINGS IN THE SOFTWARE. # For more information, please refer", "# Define active edge of PCLK. Observed by scope: #", "active edge of PCLK. Observed by scope: # 0: Data", "purpose, commercial or non-commercial, and by any # means. #", "by the EVE based on the timing # Active width", "WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT", "= 4 # Vertical Front Porch VLP = 1 #", "of the port/adaptation of existing C based EVE libraries #", "process each line. HPX = 240 # Horizontal Pixel Width", "time in order to # maximize the time that the", "#---------------------------------------------------------------------------- # Vertical timing VLH = 400 # Vertical Line", "/ Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This", "of the clock is in the middle of the data.", "middle of the data. # 1: Data is put out", "Line Padding # FTDI needs at least 1 here #", "is in the middle of the data. LCD_PCLKPOL = 0", "compile, sell, or # distribute this software, either in source", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, #", "Spread Spectrum on RGB signals. Probably not a good idea", "# relinquishment in perpetuity of all present and future rights", "form or as a compiled # binary, for any purpose,", "and # successors. We intend this dedication to be an", "60000000 # Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE", "# Vertical Line Height VS = 2 # Vertical Sync", "Sync Width HBP = 20 # Horizontal Back Porch HFP", "= False TOUCH_GOODIX_CAPACITIVE = False # Define RGB output pins", "has to process each line. HPX = 240 # Horizontal", "# Active height of LCD display LCD_HEIGHT = VLH #", "NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM,", "dither LCD_DITHER = 0 # Pixel clock divisor LCD_PCLK =", "NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR", "out coincident with rising edge of the clock. # Falling", "displays. # # 2021-10-20 <NAME> / Crystalfontz America Inc. #", "relinquishment in perpetuity of all present and future rights to", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "by scope: # 0: Data is put out coincident with", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING", "= 0 # Spread Spectrum on RGB signals. Probably not", "divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz /", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "is put out coincident with rising edge of the clock.", "# LCD drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0 #", "BridgeTek # EVE graphic accelerators. # #--------------------------------------------------------------------------- # # This", "Width HBP = 20 # Horizontal Back Porch HFP =", "copyright laws, the author or authors # of this software", "of this software dedicate any and all copyright interest in", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR", "THE USE OR # OTHER DEALINGS IN THE SOFTWARE. #", "For more information, please refer to <http:#unlicense.org/> # #============================================================================ #EVE", "LCD_HSYNC1 = HFP+HSW # Start of active line LCD_HOFFSET =", "= 2 # Vertical Sync (in lines) VBP = 2", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False #", "based on the timing # Active width of LCD display", "the # software to the public domain. We make this", "16.7mS #---------------------------------------------------------------------------- # Horizontal timing # Target 60Hz frame rate,", "clock. # Rising edge of the clock is in the", "author or authors # of this software dedicate any and", "# Define RGB output pins order, determined by PCB layout", "non-commercial, and by any # means. # In jurisdictions that", "publish, use, compile, sell, or # distribute this software, either", "Porch HFP = 10 # Horizontal Front Porch HPP =", "active line LCD_HOFFSET = HFP+HSW+HBP # Total number of clocks", "False TOUCH_GOODIX_CAPACITIVE = False # Define RGB output pins order,", "= HFP+HSW # Start of active line LCD_HOFFSET = HFP+HSW+HBP", "# # This file is part of the port/adaptation of", "coincident with rising edge of the clock. # Falling edge", "# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "here # Define the constants needed by the EVE based", "LCD_VSYNC1 = VFP+VS # Start of active screen LCD_VOFFSET =", "information, please refer to <http:#unlicense.org/> # #============================================================================ #EVE Device Type", "# software to the public domain. We make this dedication", "rate, using the largest possible line time in order to", "Total number of clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #----------------------------------------------------------------------------", "order, determined by PCB layout LCD_SWIZZLE = 2 # Define", "# to Python for Crystalfontz EVE based displays. # #", "to this # software under copyright law. # THE SOFTWARE", "= VFP # End of vertical sync pulse LCD_VSYNC1 =", "# Vertical Back Porch VFP = 4 # Vertical Front", "refer to <http:#unlicense.org/> # #============================================================================ #EVE Device Type EVE_DEVICE =", "accelerators. # #--------------------------------------------------------------------------- # # This file is part of", "act of # relinquishment in perpetuity of all present and", "software released into the public domain. # Anyone is free", "CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION", "public at large and to the detriment of our heirs", "per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH", "order to # maximize the time that the EVE has", "jurisdictions that recognize copyright laws, the author or authors #", "OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "at least 1 here # Define the constants needed by", "is put out coincident with falling edge of the clock.", "make this dedication for the benefit # of the public", "idea at higher # PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This", "# successors. We intend this dedication to be an overt", "False # Define RGB output pins order, determined by PCB", "please refer to <http:#unlicense.org/> # #============================================================================ #EVE Device Type EVE_DEVICE", "Start of horizontal sync pulse LCD_HSYNC0 = HFP # End", "#=========================================================================== # # Crystalfontz Raspberry-Pi Python example library for FTDI", "with falling edge of the clock. # Rising edge of", "# End of vertical sync pulse LCD_VSYNC1 = VFP+VS #", "# This file is part of the port/adaptation of existing", "or # distribute this software, either in source code form", "an overt act of # relinquishment in perpetuity of all", "to Python for Crystalfontz EVE based displays. # # 2021-10-20", "#EVE Device Type EVE_DEVICE = 811 # EVE Clock Speed", "intend this dedication to be an overt act of #", "C based EVE libraries # to Python for Crystalfontz EVE", "Type EVE_DEVICE = 811 # EVE Clock Speed EVE_CLOCK_SPEED =", "to process each line. HPX = 240 # Horizontal Pixel", "put out coincident with rising edge of the clock. #", "Clock Speed EVE_CLOCK_SPEED = 60000000 # Touch TOUCH_RESISTIVE = False", "each line. HPX = 240 # Horizontal Pixel Width HSW", "more information, please refer to <http:#unlicense.org/> # #============================================================================ #EVE Device", "dedicate any and all copyright interest in the # software", "and to the detriment of our heirs and # successors.", "Pixel Width HSW = 10 # Horizontal Sync Width HBP", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "layout LCD_SWIZZLE = 2 # Define active edge of PCLK.", "frequencies. LCD_PCLK_CSPREAD = 0 #This is not a 24-bit display,", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, #", "interest in the # software to the public domain. We", "# Start of horizontal sync pulse LCD_HSYNC0 = HFP #", "in source code form or as a compiled # binary,", "pulse LCD_VSYNC0 = VFP # End of vertical sync pulse", "/ BridgeTek # EVE graphic accelerators. # #--------------------------------------------------------------------------- # #", "the timing # Active width of LCD display LCD_WIDTH =", "present and future rights to this # software under copyright", "BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS", "of the data. # 1: Data is put out coincident", "# 1: Data is put out coincident with rising edge", "= HPX # Start of horizontal sync pulse LCD_HSYNC0 =", "= VFP+VS # Start of active screen LCD_VOFFSET = VFP+VS+VBP", "IN THE SOFTWARE. # For more information, please refer to", "of LCD display LCD_WIDTH = HPX # Start of horizontal", "#--------------------------------------------------------------------------- # # This file is part of the port/adaptation", "OR # OTHER DEALINGS IN THE SOFTWARE. # For more", "10 # Horizontal Front Porch HPP = 209 # Horizontal", "# Horizontal Front Porch HPP = 209 # Horizontal Pixel", "# Vertical Sync (in lines) VBP = 2 # Vertical", "LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz / 16.7mS", "sync pulse LCD_VSYNC1 = VFP+VS # Start of active screen", "# 2021-10-20 <NAME> / Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #---------------------------------------------------------------------------", "any # means. # In jurisdictions that recognize copyright laws,", "part of the port/adaptation of existing C based EVE libraries", "determined by PCB layout LCD_SWIZZLE = 2 # Define active", "# Total number of clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP", "output pins order, determined by PCB layout LCD_SWIZZLE = 2", "Raspberry-Pi Python example library for FTDI / BridgeTek # EVE", "# #--------------------------------------------------------------------------- # # This file is part of the", "the clock is in the middle of the data. LCD_PCLKPOL", "LCD_DITHER = 0 # Pixel clock divisor LCD_PCLK = 5", "# Horizontal Pixel Width HSW = 10 # Horizontal Sync", "# 0: Data is put out coincident with falling edge", "detriment of our heirs and # successors. We intend this", "Device Type EVE_DEVICE = 811 # EVE Clock Speed EVE_CLOCK_SPEED", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR #", "or as a compiled # binary, for any purpose, commercial", "successors. We intend this dedication to be an overt act", "Height VS = 2 # Vertical Sync (in lines) VBP", "width of LCD display LCD_WIDTH = HPX # Start of", "LCD_HSYNC0 = HFP # End of horizontal sync pulse LCD_HSYNC1", "# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "Python for Crystalfontz EVE based displays. # # 2021-10-20 <NAME>", "of horizontal sync pulse LCD_HSYNC0 = HFP # End of", "VLH = 400 # Vertical Line Height VS = 2", "as a compiled # binary, for any purpose, commercial or", "# PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This is not a", "We intend this dedication to be an overt act of", "pulse LCD_HSYNC0 = HFP # End of horizontal sync pulse", "is in the middle of the data. # 1: Data", "is part of the port/adaptation of existing C based EVE", "# maximize the time that the EVE has to process", "# Rising edge of the clock is in the middle", "clocks per line LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing", "# FTDI needs at least 1 here # Define the", "the constants needed by the EVE based on the timing", "into the public domain. # Anyone is free to copy,", "modify, publish, use, compile, sell, or # distribute this software,", "binary, for any purpose, commercial or non-commercial, and by any", "Pixel Padding # FTDI needs at least 1 here #", "the public domain. # Anyone is free to copy, modify,", "AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE", "https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is free and unencumbered software", "this software, either in source code form or as a", "811 # EVE Clock Speed EVE_CLOCK_SPEED = 60000000 # Touch", "the benefit # of the public at large and to", "0 # Pixel clock divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- #", "# Horizontal Back Porch HFP = 10 # Horizontal Front", "HFP+HSW+HBP # Total number of clocks per line LCD_HCYCLE =", "USE OR # OTHER DEALINGS IN THE SOFTWARE. # For", "to # maximize the time that the EVE has to", "for Crystalfontz EVE based displays. # # 2021-10-20 <NAME> /", "pins order, determined by PCB layout LCD_SWIZZLE = 2 #", "lines) VBP = 2 # Vertical Back Porch VFP =", "in the # software to the public domain. We make", "EVE_DEVICE = 811 # EVE Clock Speed EVE_CLOCK_SPEED = 60000000", "# https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is free and unencumbered", "AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER", "to <http:#unlicense.org/> # #============================================================================ #EVE Device Type EVE_DEVICE = 811", "Line Height VS = 2 # Vertical Sync (in lines)", "LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER", "# Active width of LCD display LCD_WIDTH = HPX #", "of vertical sync pulse LCD_VSYNC1 = VFP+VS # Start of", "2 # Vertical Sync (in lines) VBP = 2 #", "Falling edge of the clock is in the middle of", "# This is free and unencumbered software released into the", "law. # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "HPX+HFP+HSW+HBP+HPP #---------------------------------------------------------------------------- # Vertical timing VLH = 400 # Vertical", "on the timing # Active height of LCD display LCD_HEIGHT", "OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH", "BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY,", "#This is not a 24-bit display, so dither LCD_DITHER =", "any purpose, commercial or non-commercial, and by any # means.", "# Touch TOUCH_RESISTIVE = False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE =", "Sync (in lines) VBP = 2 # Vertical Back Porch", "the largest possible line time in order to # maximize", "the clock. # Falling edge of the clock is in", "LCD_PCLK_CSPREAD = 0 #This is not a 24-bit display, so", "Define RGB output pins order, determined by PCB layout LCD_SWIZZLE", "and all copyright interest in the # software to the", "drive strength: 0=5mA, 1=10mA LCD_DRIVE_10MA = 0 # Spread Spectrum", "VBP = 2 # Vertical Back Porch VFP = 4", "future rights to this # software under copyright law. #", "of # relinquishment in perpetuity of all present and future", "code form or as a compiled # binary, for any", "# # This is free and unencumbered software released into", "line time in order to # maximize the time that", "software under copyright law. # THE SOFTWARE IS PROVIDED \"AS", "# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR", "timing VLH = 400 # Vertical Line Height VS =", "# software under copyright law. # THE SOFTWARE IS PROVIDED", "Pixel clock divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate =", "free to copy, modify, publish, use, compile, sell, or #", "clock divisor LCD_PCLK = 5 #---------------------------------------------------------------------------- # Frame_Rate = 60Hz", "on RGB signals. Probably not a good idea at higher", "public domain. # Anyone is free to copy, modify, publish,", "vertical sync pulse LCD_VSYNC1 = VFP+VS # Start of active", "by any # means. # In jurisdictions that recognize copyright", "scope: # 0: Data is put out coincident with falling", "our heirs and # successors. We intend this dedication to", "VLH # Start of vertical sync pulse LCD_VSYNC0 = VFP", "Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- # # This is free and", "False TOUCH_CAPACITIVE = False TOUCH_GOODIX_CAPACITIVE = False # Define RGB", "VFP+VS # Start of active screen LCD_VOFFSET = VFP+VS+VBP #", "compiled # binary, for any purpose, commercial or non-commercial, and", "= 10 # Horizontal Sync Width HBP = 20 #", "# Total number of lines per screen LCD_VCYCLE = VLH+VFP+VS+VBP+VLP", "2021-10-20 <NAME> / Crystalfontz America Inc. # https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php #--------------------------------------------------------------------------- #", "# Target 60Hz frame rate, using the largest possible line", "of our heirs and # successors. We intend this dedication", "the port/adaptation of existing C based EVE libraries # to", "line LCD_HOFFSET = HFP+HSW+HBP # Total number of clocks per", "is free to copy, modify, publish, use, compile, sell, or", "of the clock. # Falling edge of the clock is", "the author or authors # of this software dedicate any", "2 # Vertical Back Porch VFP = 4 # Vertical", "so dither LCD_DITHER = 0 # Pixel clock divisor LCD_PCLK", "at higher # PCLK frequencies. LCD_PCLK_CSPREAD = 0 #This is", "EVE based on the timing # Active width of LCD", "This is free and unencumbered software released into the public", "ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "4 # Vertical Front Porch VLP = 1 # Vertical", "the public domain. We make this dedication for the benefit", "the EVE based on the timing # Active width of", "the public at large and to the detriment of our", "frame rate, using the largest possible line time in order", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY,", "240 # Horizontal Pixel Width HSW = 10 # Horizontal", "sync pulse LCD_HSYNC1 = HFP+HSW # Start of active line", "THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR #", "domain. We make this dedication for the benefit # of", "Active width of LCD display LCD_WIDTH = HPX # Start", "this software dedicate any and all copyright interest in the", "LCD_DRIVE_10MA = 0 # Spread Spectrum on RGB signals. Probably", "CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER", "by PCB layout LCD_SWIZZLE = 2 # Define active edge", "or authors # of this software dedicate any and all", "be an overt act of # relinquishment in perpetuity of", "0=5mA, 1=10mA LCD_DRIVE_10MA = 0 # Spread Spectrum on RGB", "perpetuity of all present and future rights to this #", "timing # Active height of LCD display LCD_HEIGHT = VLH", "OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "software dedicate any and all copyright interest in the #", "OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION", "HFP # End of horizontal sync pulse LCD_HSYNC1 = HFP+HSW", "largest possible line time in order to # maximize the", "= 209 # Horizontal Pixel Padding # FTDI needs at", "the detriment of our heirs and # successors. We intend", "HSW = 10 # Horizontal Sync Width HBP = 20", "of the clock. # Rising edge of the clock is" ]
[ "import numpy as np import quapy as qp from quapy.data.base", "elif self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation else: raise ValueError(f'\"validation\"", "specified, then eval_budget is requested. Ignored if protocol!='app'. :param n_repetitions:", "invoked (only for protocol='gen'). :param n_jobs: number of parallel jobs", "or a str representing\\n' f'the name of an error function", "values the list of values to explore :param sample_size: the", "0): raise ValueError(f'when protocol=\"npp\" the parameter eval_budget should be '", "str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model", "k in enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout) try: #", "finished: best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the", "self.best_score_ is None and some_timeouts: raise TimeoutError('all jobs took more", "labelled data to extract from the training set, or a", "according to the best model found by the model selection", "estim_prevalences = self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params}", "selected protocol') def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def", "Fits methods with all combinations of hyperparameters and selects the", "hyper-parameters to explore (`param_grid`) :param deep: Unused :return: the dictionary", "'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values())", "indicating the' f'proportion of training documents to extract (type found:", "self.model if self.timeout > 0: def handler(signum, frame): self._sout('timeout reached')", "assert self.eval_budget is None or isinstance(self.eval_budget, int) if self.protocol in", "the proportion of labelled data to extract from the training", "print('[warning] n_repetitions has been set and will be ignored for", "setting it to 6 would generate more than 20. When", "a callable function or a str representing\\n' f'the name of", "'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def", "whole development set') self.best_model_.fit(training + val_split) return self def quantify(self,", "than 20. When protocol='gen', indicates the maximum number of samples", "self.timeout > 0: def handler(signum, frame): self._sout('timeout reached') raise TimeoutError()", "names and values the list of values to explore \"\"\"", "of prevalences. This parameter is ignored for the protocol='app' if", "to optimize :type model: BaseQuantifier :param param_grid: a dictionary with", "a ndarray of shape `(n_classes)` with the class identifiers \"\"\"", "score self.best_params_ = params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score", "eval_budget=20, then n_prevpoints will be set to 5, since this", "instances): \"\"\"Estimate class prevalence values using the best model found", "instances: sample contanining the instances :return: a ndarray of shape", "and is lower than the number of combinations that would", "val_split = self.val_split training, val_split = self.__check_training_validation(training, val_split) if self.protocol", "based on an evaluation method and on an evaluation protocol", "more than 20. When protocol='gen', indicates the maximum number of", "numpy as np import quapy as qp from quapy.data.base import", "be a LabelledCollection or a float in (0,1) indicating the'", "model on the whole labelled collection (training+validation) with the best", "generator that yields ' f'the sample instances along with their", "are those in qp.error.QUANTIFICATION_ERROR :param refit: whether or not to", "each iteration by ' f'setting protocol=\"gen\".') def __check_error(self, error): if", "prevalences. This parameter is ignored for the protocol='app' if eval_budget", "{ 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False }", "!= 1: print('[warning] n_repetitions has been set and will be", "a ' \\ 'sample (instances) and their prevalence (ndarray) at", "a run takes longer than this timer, that configuration will", "< 1., 'validation proportion should be in (0,1)' training, validation", "as np import quapy as qp from quapy.data.base import LabelledCollection", "' f'the sample instances along with their true prevalence at", "= qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model =", "'gen' for using a custom sampling generator function :param n_prevpoints:", "= True if self.best_score_ is None and some_timeouts: raise TimeoutError('all", "(training+validation) with the best chosen hyperparameter combination. Ignored if protocol='gen'", "n_repetitions=1 and eval_budget=20, then n_prevpoints will be set to 5,", "if protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20, then", "for the selected protocol') def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]:", "will be ignored for the selected protocol') def _sout(self, msg):", "to define the prevalences of the samples; e.g., if n_prevpoints=5,", "> 0: signal.alarm(self.timeout) try: # overrides default parameters with the", "self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection):", "LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import", "inspect from util import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization", ":meth:`fit` method, i.e., the one trained on the combination of", "if self.protocol == 'app': return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints,", "raise ValueError(f'when protocol=\"npp\" the parameter eval_budget should be ' f'indicated", "\"app\" or \"npp\" for the \"artificial\" or the \"natural\" prevalence", "either be a callable function or a str representing\\n' f'the", "number of samples to generate, but less samples will be", "extract (type found: {type(validation)}). ' f'Optionally, \"validation\" can be a", "quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import", "quantification method, based on an evaluation method and on an", "of shape `(n_classes)` with class prevalence estimates as according to", "set and is lower than the number of combinations that", "been trained on. :return: a ndarray of shape `(n_classes)` with", "raise ValueError(f'unexpected error type; must either be a callable function", "= list(self.param_grid.values()) model = self.model if self.timeout > 0: def", "if self.best_score_ is None and some_timeouts: raise TimeoutError('all jobs took", "quantifier \"\"\" if hasattr(self, 'best_model_'): return self.best_model_ raise ValueError('best_model called", "training, validation else: raise ValueError(f'\"validation\" must either be a LabelledCollection", "f'Optionally, \"validation\" can be a callable function returning a generator", "= param_grid self.sample_size = sample_size self.protocol = protocol.lower() self.n_prevpoints =", "but less samples will be generated if the generator yields", "protocol='gen' (use eval_budget for setting a maximum number of samples", "data to extract from the training set, or a callable", "be explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If not", "if self.refit: self._sout(f'refitting on the whole development set') self.best_model_.fit(training +", ") elif self.protocol == 'npp': return natural_prevalence_prediction( model, val_split, self.sample_size,", "error: Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1,", "model found by the model selection process. \"\"\" assert hasattr(self,", "estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}') if self.best_score_ is", "ignored, a TimeoutError exception is raised. If -1 (default) then", "a str representing\\n' f'the name of an error function in", "found after calling the :meth:`fit` method. :param instances: sample contanining", "be in (0,1)' training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed)", "timeout time to end') self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})')", "or a callable returning a generator function each time it", "on the number of evaluations to perform for each hyper-parameter", "dictionary `param_grid` \"\"\" return self.param_grid def best_model(self): \"\"\" Returns the", ":param sample_size: the size of the samples to extract from", "if self.timeout > 0: signal.alarm(self.timeout) try: # overrides default parameters", "ndarray of shape `(n_classes)` with class prevalence estimates as according", "keys the parameter names and values the list of values", "explore :param sample_size: the size of the samples to extract", "int = 1, eval_budget: int = None, error: Union[Callable, str]", "error type; must either be a callable function or a", "dictionary with keys the parameter names and values the list", "hyperparameters of a quantification method, based on an evaluation method", "be a callable function returning a generator that yields '", "which to test the performance of the different settings, or", "self.timeout = timeout self.verbose = verbose self.__check_error(error) assert self.protocol in", "in those cases). :param eval_budget: if specified, sets a ceil", "from util import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting", "= None, n_repetitions: int = 1, eval_budget: int = None,", "time to end') self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})') if", "= training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training, validation elif self.protocol=='gen'", "set on which to optimize the hyperparameters :param val_split: either", "raise TimeoutError('all jobs took more than the timeout time to", "val_split that yields a ' \\ 'sample (instances) and their", "quantifier has been trained on. :return: a ndarray of shape", "return training, validation elif isinstance(validation, float): assert 0. < validation", "and some_timeouts: raise TimeoutError('all jobs took more than the timeout", "to end') self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit:", "'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False } if self.protocol ==", "evaluation protocol for quantification. :param model: the quantifier to optimize", "routine. Fits methods with all combinations of hyperparameters and selects", "if protocl='gen') :param protocol: either 'app' for the artificial prevalence", ":param error: an error function (callable) or a string indicating", "self._sout(f'refitting on the whole development set') self.best_model_.fit(training + val_split) return", "the natural prevalence protocol, or 'gen' for using a custom", "quapy.method.aggregative import BaseQuantifier import inspect from util import _check_sample_size class", "fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None): \"\"\"", "e.g., if n_prevpoints=5, then the prevalences for each class will", "\"natural\" prevalence ' \\ 'protocols. Use protocol=\"gen\" when passing a", "the timeout time to end') self._sout(f'optimization finished: best params {self.best_params_}", "for each of the hyperparameters configurations being tested. Whenever a", "found: {type(validation)}). ' f'Optionally, \"validation\" can be a callable function", "information through the stdout \"\"\" def __init__(self, model: BaseQuantifier, param_grid:", "'validation proportion should be in (0,1)' training, validation = training.split_stratified(train_prop=1", "on the whole labelled collection (training+validation) with the best chosen", "when passing a generator function thorough val_split that yields a", "param_grid: a dictionary with keys the parameter names and values", "and values the list of values to explore :param sample_size:", "not specified, then eval_budget is requested. Ignored if protocol!='app'. :param", "val_split self.n_jobs = n_jobs self.random_seed = random_seed self.timeout = timeout", "timeout=-1, verbose=False): self.model = model self.param_grid = param_grid self.sample_size =", "collection (training+validation) with the best chosen hyperparameter combination. Ignored if", "training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation else:", "If all configurations end up being ignored, a TimeoutError exception", "than the number of combinations that would be generated using", "Callable] = None): \"\"\" Learning routine. Fits methods with all", "self.val_split training, val_split = self.__check_training_validation(training, val_split) if self.protocol != 'gen':", "For example, if protocol='app', there are 3 classes, n_repetitions=1 and", "the performance of the different settings, or a float in", "model = self.model if self.timeout > 0: def handler(signum, frame):", "self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_", "protocol: either 'app' for the artificial prevalence protocol, 'npp' for", "values to explore :param sample_size: the size of the samples", "(for the current number of classes and n_repetitions). Ignored for", "on. :return: a ndarray of shape `(n_classes)` with the class", "hyper-parameters to explore. :param parameters: a dictionary with keys the", "import signal from copy import deepcopy from typing import Union,", "model selection process. \"\"\" assert hasattr(self, 'best_model_'), 'quantify called before", "If not specified, then eval_budget is requested. Ignored if protocol!='app'.", "Ignored if protocol='gen' :param val_split: either a LabelledCollection on which", "minimizing the error metric. :param training: the training set on", "indicates the maximum number of samples to generate, but less", "\"\"\" return self.param_grid def best_model(self): \"\"\" Returns the best model", "repetitions for each combination of prevalences. This parameter is ignored", "Ignored if protocol='gen'. :param timeout: establishes a timer (in seconds)", "from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier", "signal from copy import deepcopy from typing import Union, Callable", "raise ValueError(f'\"validation\" must either be a LabelledCollection or a float", "ValueError(f'unexpected error type; must either be a callable function or", "to extract (type found: {type(validation)}). ' f'Optionally, \"validation\" can be", "parameters being explored at this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences", "a LabelledCollection on which to test the performance of the", "took more than the timeout time to end') self._sout(f'optimization finished:", "\"\"\"Returns the dictionary of hyper-parameters to explore (`param_grid`) :param deep:", "int) if self.protocol in ['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget", "= _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model =", "= {} self.best_score_ = None some_timeouts = False for values", "the best chosen hyperparameter combination. Ignored if protocol='gen' :param val_split:", "} if self.protocol == 'app': return artificial_prevalence_prediction( model, val_split, self.sample_size,", "of an error function (valid ones are those in qp.error.QUANTIFICATION_ERROR", "if self.n_repetitions != 1: print('[warning] n_repetitions has been set and", "values to explore \"\"\" self.param_grid = parameters def get_params(self, deep=True):", "as according to the best model found by the model", "is invoked (only for protocol='gen'). :param n_jobs: number of parallel", "class will be explored in [0.00, 0.25, 0.50, 0.75, 1.00].", "callable function or a str representing\\n' f'the name of an", "refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model = model self.param_grid", "to optimize the hyperparameters :param val_split: either a LabelledCollection on", "passing a generator function thorough val_split that yields a '", "'quantify called before fit' return self.best_model().quantify(instances) @property def classes_(self): \"\"\"", "and selects the one minimizing the error metric. :param training:", "if error in qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error, str):", "elif isinstance(validation, float): assert 0. < validation < 1., 'validation", "**parameters): \"\"\"Sets the hyper-parameters to explore. :param parameters: a dictionary", "the best model found after calling the :meth:`fit` method. :param", "training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training, validation elif self.protocol=='gen' and", "from quapy.data.base import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction", "import inspect from util import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search", "before fit' return self.best_model().quantify(instances) @property def classes_(self): \"\"\" Classes on", "metric. :param training: the training set on which to optimize", "return training, validation else: raise ValueError(f'\"validation\" must either be a", "using a custom sampling generator function :param n_prevpoints: if specified,", "def classes_(self): \"\"\" Classes on which the quantifier has been", "None or score < self.best_score_: self.best_score_ = score self.best_params_ =", "score if self.timeout > 0: signal.alarm(0) except TimeoutError: print(f'timeout reached", "an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons", "different prevalences, i.e., [0, 0, 1], [0, 0.25, 0.75], [0,", "define the prevalences of the samples; e.g., if n_prevpoints=5, then", "natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import inspect from util", "each iteration.' assert self.eval_budget is None or isinstance(self.eval_budget, int) if", "to explore :param sample_size: the size of the samples to", "data to extract from the training set :return: self \"\"\"", "that yields ' f'the sample instances along with their true", "quantification. :param model: the quantifier to optimize :type model: BaseQuantifier", "elif isinstance(error, str): self.error = qp.error.from_name(error) elif hasattr(error, '__call__'): self.error", "each combination of prevalences. This parameter is ignored for the", "[0.00, 0.25, 0.50, 0.75, 1.00]. If not specified, then eval_budget", "LabelledCollection or a float in (0,1) indicating the' f'proportion of", "self.timeout > 0: signal.alarm(0) except TimeoutError: print(f'timeout reached for config", "training set :return: self \"\"\" if val_split is None: val_split", "n_prevpoints: int = None, n_repetitions: int = 1, eval_budget: int", "prevalence estimates as according to the best model found by", "-1 (default) then no time bound is set. :param verbose:", "the error function. :return: a trained quantifier \"\"\" if hasattr(self,", "protocol='npp' and protocol='gen' (use eval_budget for setting a maximum number", "When protocol='gen', indicates the maximum number of samples to generate,", "int = None, error: Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4,", "with all combinations of hyperparameters and selects the one minimizing", "f'the sample instances along with their true prevalence at each", "self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions self.eval_budget", "class prevalence estimates as according to the best model found", "self.protocol in {'app', 'npp', 'gen'}, \\ 'unknown protocol: valid ones", "signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_ = None some_timeouts =", "msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation): if", "parameters def get_params(self, deep=True): \"\"\"Returns the dictionary of hyper-parameters to", "and protocol='gen' (use eval_budget for setting a maximum number of", "generator function thorough val_split that yields a ' \\ 'sample", "< validation < 1., 'validation proportion should be in (0,1)'", "yields ' f'the sample instances along with their true prevalence", "training: the training set on which to optimize the hyperparameters", "(score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the whole development set') self.best_model_.fit(training", "experiments. Ignored if protocol='gen'. :param timeout: establishes a timer (in", ":param n_prevpoints: if specified, indicates the number of equally distant", "raised. If -1 (default) then no time bound is set.", "number of evaluations to perform for each hyper-parameter combination. For", "list of values to explore \"\"\" self.param_grid = parameters def", "to extract from the interval [0,1] in order to define", "be ' f'indicated (and should be >0).') if self.n_repetitions !=", "{msg}') def __check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection): return training,", "for the protocol='app' if eval_budget is set and is lower", "in ['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget is None or", "the hyper-parameters to explore. :param parameters: a dictionary with keys", "to extract from the training set, or a callable returning", "self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}') if self.best_score_", "the dictionary of hyper-parameters to explore (`param_grid`) :param deep: Unused", "using the value assigned to n_prevpoints (for the current number", "trained on the combination of hyper-parameters that minimized the error", "class identifiers \"\"\" return self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets the", "proportion should be in (0,1)' training, validation = training.split_stratified(train_prop=1 -", "of parallel jobs :param random_seed: set the seed of the", "protocol, or 'gen' for using a custom sampling generator function", "the quantifier has been trained on. :return: a ndarray of", "1], [0, 0.25, 0.75], [0, 0.5, 0.5] ... [1, 0,", "the best model found after calling the :meth:`fit` method, i.e.,", "ValueError(f'\"validation\" must either be a LabelledCollection or a float in", "to explore. :param parameters: a dictionary with keys the parameter", "instances :return: a ndarray of shape `(n_classes)` with class prevalence", "and will be ignored for the selected protocol') def _sout(self,", "the combination of hyper-parameters that minimized the error function. :return:", "ignored for the selected protocol') def _sout(self, msg): if self.verbose:", "for protocol='gen'). :param n_jobs: number of parallel jobs :param random_seed:", ":param verbose: set to True to get information through the", "if protocol='gen'. :param timeout: establishes a timer (in seconds) for", "self.best_score_ is None or score < self.best_score_: self.best_score_ = score", "class prevalence values using the best model found after calling", "timer (in seconds) for each of the hyperparameters configurations being", "labelled collection (training+validation) with the best chosen hyperparameter combination. Ignored", "of values to explore :param sample_size: the size of the", "'protocols. Use protocol=\"gen\" when passing a generator function thorough val_split", "run takes longer than this timer, that configuration will be", "' f'Optionally, \"validation\" can be a callable function returning a", "[0,1] indicating the proportion of labelled data to extract from", "isinstance(validation, LabelledCollection): return training, validation elif isinstance(validation, float): assert 0.", "val_split): commons = { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed,", "self.refit: self._sout(f'refitting on the whole development set') self.best_model_.fit(training + val_split)", "will be set to 5, since this will generate 15", "params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model = self.model if", "def __check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR: self.error = error", "that yields a ' \\ 'sample (instances) and their prevalence", "some_timeouts = False for values in itertools.product(*params_values): params = dict({k:", "can be a callable function returning a generator that yields", "the seed of the random generator to replicate experiments. Ignored", "no time bound is set. :param verbose: set to True", "n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol == 'npp': return natural_prevalence_prediction(", "whole labelled collection (training+validation) with the best chosen hyperparameter combination.", "__check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection): return training, validation elif", "= self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got", "in [0.00, 0.25, 0.50, 0.75, 1.00]. If not specified, then", "not to refit the model on the whole labelled collection", "else: raise ValueError(f'\"validation\" must either be a LabelledCollection or a", "self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score if self.timeout > 0:", "function or a str representing\\n' f'the name of an error", "def __check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection): return training, validation", "the parameter names and values the list of values to", "\"\"\" def __init__(self, model: BaseQuantifier, param_grid: dict, sample_size: Union[int, None]", "eval_budget self.refit = refit self.val_split = val_split self.n_jobs = n_jobs", "and eval_budget=20, then n_prevpoints will be set to 5, since", "protocol=\"gen\" when passing a generator function thorough val_split that yields", "protocol='gen'. :param timeout: establishes a timer (in seconds) for each", "n_prevpoints self.n_repetitions = n_repetitions self.eval_budget = eval_budget self.refit = refit", "since setting it to 6 would generate more than 20.", "a quantification method, based on an evaluation method and on", "return self.best_model().quantify(instances) @property def classes_(self): \"\"\" Classes on which the", "< self.best_score_: self.best_score_ = score self.best_params_ = params self.best_model_ =", "\"validation\" can be a callable function returning a generator that", "called before fit' return self.best_model().quantify(instances) @property def classes_(self): \"\"\" Classes", "got {self.error.__name__} score {score:.5f}') if self.best_score_ is None or score", "for using a custom sampling generator function :param n_prevpoints: if", "= n_jobs self.random_seed = random_seed self.timeout = timeout self.verbose =", "evaluation method and on an evaluation protocol for quantification. :param", "elif self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise", "import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting a quantification-oriented", "of a quantification method, based on an evaluation method and", "self.timeout > 0: signal.alarm(self.timeout) try: # overrides default parameters with", "of labelled data to extract from the training set :return:", "the number of repetitions for each combination of prevalences. This", "or a string indicating the name of an error function", "print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation): if isinstance(validation, LabelledCollection): return", "random_seed=42, timeout=-1, verbose=False): self.model = model self.param_grid = param_grid self.sample_size", "the number of evaluations to perform for each hyper-parameter combination.", "function thorough val_split that yields a ' \\ 'sample (instances)", "self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model", "def get_params(self, deep=True): \"\"\"Returns the dictionary of hyper-parameters to explore", "qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error, str): self.error = qp.error.from_name(error)", "yields a ' \\ 'sample (instances) and their prevalence (ndarray)", "the number of equally distant points to extract from the", "to 6 would generate more than 20. When protocol='gen', indicates", "score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}')", "ignored. If all configurations end up being ignored, a TimeoutError", "`param_grid` \"\"\" return self.param_grid def best_model(self): \"\"\" Returns the best", "being ignored, a TimeoutError exception is raised. If -1 (default)", "(type found: {type(validation)}). ' f'Optionally, \"validation\" can be a callable", "return training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation", "= error elif isinstance(error, str): self.error = qp.error.from_name(error) elif hasattr(error,", "get information through the stdout \"\"\" def __init__(self, model: BaseQuantifier,", "== 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol')", "random_seed: set the seed of the random generator to replicate", "{params}') some_timeouts = True if self.best_score_ is None and some_timeouts:", "that minimized the error function. :return: a trained quantifier \"\"\"", "(0,1)' training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training,", "samples. :param error: an error function (callable) or a string", "that would be generated using the value assigned to n_prevpoints", "(self.eval_budget is None or self.eval_budget <= 0): raise ValueError(f'when protocol=\"npp\"", "= params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score if self.timeout", "hyperparameters and selects the one minimizing the error metric. :param", "to refit the model on the whole labelled collection (training+validation)", "self.param_scores_[str(params)] = score if self.timeout > 0: signal.alarm(0) except TimeoutError:", "protocol='gen' :param val_split: either a LabelledCollection on which to test", "contanining the instances :return: a ndarray of shape `(n_classes)` with", "then no time bound is set. :param verbose: set to", "(`param_grid`) :param deep: Unused :return: the dictionary `param_grid` \"\"\" return", "classes_(self): \"\"\" Classes on which the quantifier has been trained", "generator yields less samples. :param error: an error function (callable)", "on an evaluation method and on an evaluation protocol for", "model: BaseQuantifier :param param_grid: a dictionary with keys the parameter", "and inspect.isgenerator(validation()): return training, validation else: raise ValueError(f'\"validation\" must either", "is None or score < self.best_score_: self.best_score_ = score self.best_params_", "values in itertools.product(*params_values): params = dict({k: values[i] for i, k", "import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import inspect", "set :return: self \"\"\" if val_split is None: val_split =", "return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def fit(self,", "protocol') def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] =", "\"\"\" Learning routine. Fits methods with all combinations of hyperparameters", "generated using the value assigned to n_prevpoints (for the current", "will generate 15 different prevalences, i.e., [0, 0, 1], [0,", "a trained quantifier \"\"\" if hasattr(self, 'best_model_'): return self.best_model_ raise", "return natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif self.protocol == 'gen':", "# overrides default parameters with the parameters being explored at", "method and on an evaluation protocol for quantification. :param model:", ">0).') if self.n_repetitions != 1: print('[warning] n_repetitions has been set", "validation else: raise ValueError(f'\"validation\" must either be a LabelledCollection or", "the error metric. :param training: the training set on which", "model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol ==", "self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}') if self.best_score_ is None", ":meth:`fit` method. :param instances: sample contanining the instances :return: a", "generated if the generator yields less samples. :param error: an", "in [0,1] indicating the proportion of labelled data to extract", "and values the list of values to explore \"\"\" self.param_grid", "self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation else: raise ValueError(f'\"validation\" must", "shape `(n_classes)` with the class identifiers \"\"\" return self.best_model().classes_ def", "15 different prevalences, i.e., [0, 0, 1], [0, 0.25, 0.75],", "self.best_score_ = None some_timeouts = False for values in itertools.product(*params_values):", "configurations being tested. Whenever a run takes longer than this", "quapy.data.base import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from", "the parameters being explored at this iteration model.set_params(**params) model.fit(training) true_prevalences,", "being explored at this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences =", "of training documents to extract (type found: {type(validation)}). ' f'Optionally,", "a dictionary with keys the parameter names and values the", "\"npp\" for the \"artificial\" or the \"natural\" prevalence ' \\", "' f'setting protocol=\"gen\".') def __check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR:", ":return: self \"\"\" if val_split is None: val_split = self.val_split", "it to 6 would generate more than 20. When protocol='gen',", "explore \"\"\" self.param_grid = parameters def get_params(self, deep=True): \"\"\"Returns the", "import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative", "(and should be >0).') if self.n_repetitions != 1: print('[warning] n_repetitions", "(only for protocol='gen'). :param n_jobs: number of parallel jobs :param", "validation): if isinstance(validation, LabelledCollection): return training, validation elif isinstance(validation, float):", "the training set :return: self \"\"\" if val_split is None:", "set to 5, since this will generate 15 different prevalences,", "name of an error function (valid ones are those in", "LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None): \"\"\" Learning routine.", "with the best chosen hyperparameter combination. Ignored if protocol='gen' :param", "model found after calling the :meth:`fit` method, i.e., the one", "optimization targeting a quantification-oriented metric. Optimizes the hyperparameters of a", "(default) then no time bound is set. :param verbose: set", "def __init__(self, model: BaseQuantifier, param_grid: dict, sample_size: Union[int, None] =", "Classes on which the quantifier has been trained on. :return:", "protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20, then n_prevpoints", "== 'app': return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons", "'sample (instances) and their prevalence (ndarray) at each iteration.' assert", "training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None): \"\"\" Learning", "ndarray of shape `(n_classes)` with the class identifiers \"\"\" return", "number of equally distant points to extract from the interval", "generator to replicate experiments. Ignored if protocol='gen'. :param timeout: establishes", "samples will be generated if the generator yields less samples.", "self.best_model_.fit(training + val_split) return self def quantify(self, instances): \"\"\"Estimate class", "set the seed of the random generator to replicate experiments.", "tested. Whenever a run takes longer than this timer, that", "classes and n_repetitions). Ignored for protocol='npp' and protocol='gen' (use eval_budget", "extract from the training set, or a callable returning a", "parallel jobs :param random_seed: set the seed of the random", "model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score = self.error(true_prevalences,", "qp.error.from_name(error) elif hasattr(error, '__call__'): self.error = error else: raise ValueError(f'unexpected", "parameters with the parameters being explored at this iteration model.set_params(**params)", "from typing import Union, Callable import numpy as np import", "try: # overrides default parameters with the parameters being explored", "since this will generate 15 different prevalences, i.e., [0, 0,", "jobs took more than the timeout time to end') self._sout(f'optimization", "the value assigned to n_prevpoints (for the current number of", "5, since this will generate 15 different prevalences, i.e., [0,", "self.n_repetitions != 1: print('[warning] n_repetitions has been set and will", "or \"npp\" for the \"artificial\" or the \"natural\" prevalence '", "isinstance(error, str): self.error = qp.error.from_name(error) elif hasattr(error, '__call__'): self.error =", "dict, sample_size: Union[int, None] = None, protocol='app', n_prevpoints: int =", "set (ignored if protocl='gen') :param protocol: either 'app' for the", "sample_size: Union[int, None] = None, protocol='app', n_prevpoints: int = None,", "protocol=\"gen\".') def __check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR: self.error =", "'app': return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons )", "'npp' for the natural prevalence protocol, or 'gen' for using", "self.random_seed, 'verbose': False } if self.protocol == 'app': return artificial_prevalence_prediction(", "[0, 0.5, 0.5] ... [1, 0, 0], and since setting", "= score self.best_params_ = params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] =", "true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking", "to generate, but less samples will be generated if the", "the parameter eval_budget should be ' f'indicated (and should be", "with their true prevalence at each iteration by ' f'setting", "along with their true prevalence at each iteration by '", "validation < 1., 'validation proportion should be in (0,1)' training,", "if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <= 0):", "the' f'proportion of training documents to extract (type found: {type(validation)}).", "the prevalences of the samples; e.g., if n_prevpoints=5, then the", "for setting a maximum number of samples in those cases).", "to extract from the validation set (ignored if protocl='gen') :param", "error in qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error, str): self.error", "to explore (`param_grid`) :param deep: Unused :return: the dictionary `param_grid`", "the prevalences for each class will be explored in [0.00,", "self.sample_size, **commons) elif self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget)", "\"\"\" if val_split is None: val_split = self.val_split training, val_split", "in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons = { 'n_repetitions':", "if specified, indicates the number of equally distant points to", "from the validation set (ignored if protocl='gen') :param protocol: either", "the samples; e.g., if n_prevpoints=5, then the prevalences for each", "`(n_classes)` with class prevalence estimates as according to the best", "the \"natural\" prevalence ' \\ 'protocols. Use protocol=\"gen\" when passing", "' \\ 'sample (instances) and their prevalence (ndarray) at each", "self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol == 'npp': return", "samples to extract from the validation set (ignored if protocl='gen')", "and n_repetitions). Ignored for protocol='npp' and protocol='gen' (use eval_budget for", "= 1, eval_budget: int = None, error: Union[Callable, str] =", "param_grid: dict, sample_size: Union[int, None] = None, protocol='app', n_prevpoints: int", "hyper-parameter combination. For example, if protocol='app', there are 3 classes,", "inspect.isgenerator(validation()): return training, validation else: raise ValueError(f'\"validation\" must either be", "1., 'validation proportion should be in (0,1)' training, validation =", "error: an error function (callable) or a string indicating the", "the hyperparameters of a quantification method, based on an evaluation", "after calling the :meth:`fit` method. :param instances: sample contanining the", "this will generate 15 different prevalences, i.e., [0, 0, 1],", "combination. For example, if protocol='app', there are 3 classes, n_repetitions=1", "is None or self.eval_budget <= 0): raise ValueError(f'when protocol=\"npp\" the", "Returns the best model found after calling the :meth:`fit` method,", "then n_prevpoints will be set to 5, since this will", "ValueError(f'when protocol=\"npp\" the parameter eval_budget should be ' f'indicated (and", "gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def fit(self, training: LabelledCollection,", "explore (`param_grid`) :param deep: Unused :return: the dictionary `param_grid` \"\"\"", "default parameters with the parameters being explored at this iteration", "['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget", "False for values in itertools.product(*params_values): params = dict({k: values[i] for", "best model found by the model selection process. \"\"\" assert", "Union, Callable import numpy as np import quapy as qp", "param_grid self.sample_size = sample_size self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints", "extract from the training set :return: self \"\"\" if val_split", "the whole labelled collection (training+validation) with the best chosen hyperparameter", "be ignored for the selected protocol') def _sout(self, msg): if", "a callable function returning a generator that yields ' f'the", "<= 0): raise ValueError(f'when protocol=\"npp\" the parameter eval_budget should be", "eval_budget=self.eval_budget, **commons ) elif self.protocol == 'npp': return natural_prevalence_prediction( model,", "current number of classes and n_repetitions). Ignored for protocol='npp' and", "in qp.error.QUANTIFICATION_ERROR :param refit: whether or not to refit the", "= protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions self.eval_budget =", "function returning a generator that yields ' f'the sample instances", "values using the best model found after calling the :meth:`fit`", "0: signal.alarm(0) except TimeoutError: print(f'timeout reached for config {params}') some_timeouts", "method, i.e., the one trained on the combination of hyper-parameters", "being tested. Whenever a run takes longer than this timer,", "f'indicated (and should be >0).') if self.n_repetitions != 1: print('[warning]", "or the \"natural\" prevalence ' \\ 'protocols. Use protocol=\"gen\" when", "refit the model on the whole labelled collection (training+validation) with", "of evaluations to perform for each hyper-parameter combination. For example,", "sampling generator function :param n_prevpoints: if specified, indicates the number", "explored at this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model,", "of hyperparameters and selects the one minimizing the error metric.", "TimeoutError('all jobs took more than the timeout time to end')", "development set') self.best_model_.fit(training + val_split) return self def quantify(self, instances):", "at each iteration.' assert self.eval_budget is None or isinstance(self.eval_budget, int)", "in itertools.product(*params_values): params = dict({k: values[i] for i, k in", "n_repetitions: int = 1, eval_budget: int = None, error: Union[Callable,", ":return: a ndarray of shape `(n_classes)` with the class identifiers", "(use eval_budget for setting a maximum number of samples in", "self.__check_training_validation(training, val_split) if self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys", "__generate_predictions(self, model, val_split): commons = { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs,", "{self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the whole development set')", "a ceil on the number of evaluations to perform for", "the validation set (ignored if protocl='gen') :param protocol: either 'app'", "than this timer, that configuration will be ignored. If all", "the dictionary `param_grid` \"\"\" return self.param_grid def best_model(self): \"\"\" Returns", "the one minimizing the error metric. :param training: the training", "each class will be explored in [0.00, 0.25, 0.50, 0.75,", "protocol=\"npp\" the parameter eval_budget should be ' f'indicated (and should", "f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self,", "will be explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If", "elif self.protocol == 'npp': return natural_prevalence_prediction( model, val_split, self.sample_size, **commons)", "combinations of hyperparameters and selects the one minimizing the error", "if self.timeout > 0: signal.alarm(0) except TimeoutError: print(f'timeout reached for", "self.n_jobs, 'random_seed': self.random_seed, 'verbose': False } if self.protocol == 'app':", "self.sample_size = sample_size self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions", "util import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting a", "which the quantifier has been trained on. :return: a ndarray", "for each class will be explored in [0.00, 0.25, 0.50,", "or not to refit the model on the whole labelled", "\"\"\" assert hasattr(self, 'best_model_'), 'quantify called before fit' return self.best_model().quantify(instances)", "'npp': return natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif self.protocol ==", "training set on which to optimize the hyperparameters :param val_split:", "itertools.product(*params_values): params = dict({k: values[i] for i, k in enumerate(params_keys)})", "= sample_size self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions =", "val_split is None: val_split = self.val_split training, val_split = self.__check_training_validation(training,", "protocol='app', n_prevpoints: int = None, n_repetitions: int = 1, eval_budget:", "protocol, 'npp' for the natural prevalence protocol, or 'gen' for", "the model selection process. \"\"\" assert hasattr(self, 'best_model_'), 'quantify called", "None): \"\"\" Learning routine. Fits methods with all combinations of", "0.5, 0.5] ... [1, 0, 0], and since setting it", "each hyper-parameter combination. For example, if protocol='app', there are 3", "sets a ceil on the number of evaluations to perform", "found after calling the :meth:`fit` method, i.e., the one trained", "seed of the random generator to replicate experiments. Ignored if", "to test the performance of the different settings, or a", "the hyperparameters configurations being tested. Whenever a run takes longer", "error elif isinstance(error, str): self.error = qp.error.from_name(error) elif hasattr(error, '__call__'):", "@property def classes_(self): \"\"\" Classes on which the quantifier has", "either be a LabelledCollection or a float in (0,1) indicating", "sample instances along with their true prevalence at each iteration", "set. :param verbose: set to True to get information through", "val_split) if self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys =", "a float in [0,1] indicating the proportion of labelled data", "longer than this timer, that configuration will be ignored. If", "None, n_repetitions: int = 1, eval_budget: int = None, error:", "val_split) return self def quantify(self, instances): \"\"\"Estimate class prevalence values", "on which the quantifier has been trained on. :return: a", "- validation, random_state=self.random_seed) return training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()):", "Unused :return: the dictionary `param_grid` \"\"\" return self.param_grid def best_model(self):", "list of values to explore :param sample_size: the size of", "__init__(self, model: BaseQuantifier, param_grid: dict, sample_size: Union[int, None] = None,", "protocol!='app'. :param n_repetitions: the number of repetitions for each combination", "protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions self.eval_budget = eval_budget", "if self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys())", "function. :return: a trained quantifier \"\"\" if hasattr(self, 'best_model_'): return", "for each hyper-parameter combination. For example, if protocol='app', there are", "whether or not to refit the model on the whole", "artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol", "get_params(self, deep=True): \"\"\"Returns the dictionary of hyper-parameters to explore (`param_grid`)", "0: def handler(signum, frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler)", "deepcopy(model) self.param_scores_[str(params)] = score if self.timeout > 0: signal.alarm(0) except", "eval_budget for setting a maximum number of samples in those", "score {score:.5f}') if self.best_score_ is None or score < self.best_score_:", "requested. Ignored if protocol!='app'. :param n_repetitions: the number of repetitions", "on an evaluation protocol for quantification. :param model: the quantifier", "parameters: a dictionary with keys the parameter names and values", "BaseQuantifier :param param_grid: a dictionary with keys the parameter names", "signal.alarm(self.timeout) try: # overrides default parameters with the parameters being", "from copy import deepcopy from typing import Union, Callable import", ":param instances: sample contanining the instances :return: a ndarray of", "the list of values to explore :param sample_size: the size", "of hyper-parameters to explore (`param_grid`) :param deep: Unused :return: the", "if the generator yields less samples. :param error: an error", "TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_ = None some_timeouts", "\\ 'protocols. Use protocol=\"gen\" when passing a generator function thorough", "quapy as qp from quapy.data.base import LabelledCollection from quapy.evaluation import", "self.random_seed = random_seed self.timeout = timeout self.verbose = verbose self.__check_error(error)", "be generated using the value assigned to n_prevpoints (for the", "takes longer than this timer, that configuration will be ignored.", "will be ignored. If all configurations end up being ignored,", "less samples. :param error: an error function (callable) or a", "indicating the proportion of labelled data to extract from the", "each of the hyperparameters configurations being tested. Whenever a run", "validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training, validation elif", "class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting a quantification-oriented metric. Optimizes", "number of combinations that would be generated using the value", "ignored for the protocol='app' if eval_budget is set and is", "function (callable) or a string indicating the name of an", "= { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False", "their true prevalence at each iteration by ' f'setting protocol=\"gen\".')", "0. < validation < 1., 'validation proportion should be in", "random_seed self.timeout = timeout self.verbose = verbose self.__check_error(error) assert self.protocol", "'npp', 'gen'}, \\ 'unknown protocol: valid ones are \"app\" or", "will be generated if the generator yields less samples. :param", "setting a maximum number of samples in those cases). :param", "self.best_params_ = params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score if", "jobs :param random_seed: set the seed of the random generator", "model self.param_grid = param_grid self.sample_size = sample_size self.protocol = protocol.lower()", "elif hasattr(error, '__call__'): self.error = error else: raise ValueError(f'unexpected error", "0.25, 0.75], [0, 0.5, 0.5] ... [1, 0, 0], and", "string indicating the name of an error function (valid ones", "\"artificial\" or the \"natural\" prevalence ' \\ 'protocols. Use protocol=\"gen\"", "def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training,", "a maximum number of samples in those cases). :param eval_budget:", "proportion of labelled data to extract from the training set", "the generator yields less samples. :param error: an error function", "{self.error.__name__} score {score:.5f}') if self.best_score_ is None or score <", "lower than the number of combinations that would be generated", "protocol') def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self,", "order to define the prevalences of the samples; e.g., if", "found by the model selection process. \"\"\" assert hasattr(self, 'best_model_'),", "n_repetitions self.eval_budget = eval_budget self.refit = refit self.val_split = val_split", "different settings, or a float in [0,1] indicating the proportion", "prevalence protocol, 'npp' for the natural prevalence protocol, or 'gen'", ":param training: the training set on which to optimize the", "list(self.param_grid.values()) model = self.model if self.timeout > 0: def handler(signum,", "error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons =", "Union[LabelledCollection, float, Callable] = None): \"\"\" Learning routine. Fits methods", "if hasattr(self, 'best_model_'): return self.best_model_ raise ValueError('best_model called before fit')", "'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False } if", "is set and is lower than the number of combinations", "artificial prevalence protocol, 'npp' for the natural prevalence protocol, or", "qp from quapy.data.base import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction, natural_prevalence_prediction,", "from quapy.method.aggregative import BaseQuantifier import inspect from util import _check_sample_size", "of repetitions for each combination of prevalences. This parameter is", "_sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation):", "isinstance(validation, float): assert 0. < validation < 1., 'validation proportion", "from the training set :return: self \"\"\" if val_split is", "ones are \"app\" or \"npp\" for the \"artificial\" or the", ":param refit: whether or not to refit the model on", "to n_prevpoints (for the current number of classes and n_repetitions).", "maximum number of samples in those cases). :param eval_budget: if", "for config {params}') some_timeouts = True if self.best_score_ is None", "prevalences of the samples; e.g., if n_prevpoints=5, then the prevalences", "params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the whole development", "using the best model found after calling the :meth:`fit` method.", "0: signal.alarm(self.timeout) try: # overrides default parameters with the parameters", "labelled data to extract from the training set :return: self", "best model found after calling the :meth:`fit` method, i.e., the", "gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import inspect from util import", "size of the samples to extract from the validation set", "self.eval_budget is None or isinstance(self.eval_budget, int) if self.protocol in ['npp',", "n_prevpoints will be set to 5, since this will generate", "of the samples; e.g., if n_prevpoints=5, then the prevalences for", "refit self.val_split = val_split self.n_jobs = n_jobs self.random_seed = random_seed", "itertools import signal from copy import deepcopy from typing import", "self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose': False } if self.protocol", "val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif self.protocol == 'npp':", "sample contanining the instances :return: a ndarray of shape `(n_classes)`", "handler(signum, frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ =", "_check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting a quantification-oriented metric.", "one minimizing the error metric. :param training: the training set", "return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget, **commons ) elif", "== 'npp': return natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif self.protocol", "Whenever a run takes longer than this timer, that configuration", "0], and since setting it to 6 would generate more", ":param parameters: a dictionary with keys the parameter names and", "the interval [0,1] in order to define the prevalences of", "an error function (valid ones are those in qp.error.QUANTIFICATION_ERROR :param", "False } if self.protocol == 'app': return artificial_prevalence_prediction( model, val_split,", "a generator that yields ' f'the sample instances along with", "for values in itertools.product(*params_values): params = dict({k: values[i] for i,", "= list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model = self.model if self.timeout", "stdout \"\"\" def __init__(self, model: BaseQuantifier, param_grid: dict, sample_size: Union[int,", "error function (valid ones are those in qp.error.QUANTIFICATION_ERROR :param refit:", "at this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split)", "points to extract from the interval [0,1] in order to", "'best_model_'), 'quantify called before fit' return self.best_model().quantify(instances) @property def classes_(self):", "function each time it is invoked (only for protocol='gen'). :param", "GridSearchQ(BaseQuantifier): \"\"\"Grid Search optimization targeting a quantification-oriented metric. Optimizes the", "equally distant points to extract from the interval [0,1] in", "to extract from the training set :return: self \"\"\" if", "Learning routine. Fits methods with all combinations of hyperparameters and", "is None and some_timeouts: raise TimeoutError('all jobs took more than", "If -1 (default) then no time bound is set. :param", "of classes and n_repetitions). Ignored for protocol='npp' and protocol='gen' (use", "+ val_split) return self def quantify(self, instances): \"\"\"Estimate class prevalence", "frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {}", "set, or a callable returning a generator function each time", "self.eval_budget <= 0): raise ValueError(f'when protocol=\"npp\" the parameter eval_budget should", "self def quantify(self, instances): \"\"\"Estimate class prevalence values using the", "\"\"\" return self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets the hyper-parameters to", "by the model selection process. \"\"\" assert hasattr(self, 'best_model_'), 'quantify", "return self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets the hyper-parameters to explore.", "0, 1], [0, 0.25, 0.75], [0, 0.5, 0.5] ... [1,", "thorough val_split that yields a ' \\ 'sample (instances) and", "from the interval [0,1] in order to define the prevalences", "_check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model = self.model", "parameter names and values the list of values to explore", "params_values = list(self.param_grid.values()) model = self.model if self.timeout > 0:", "the one trained on the combination of hyper-parameters that minimized", "ones are those in qp.error.QUANTIFICATION_ERROR :param refit: whether or not", "if self.best_score_ is None or score < self.best_score_: self.best_score_ =", "of samples to generate, but less samples will be generated", "targeting a quantification-oriented metric. Optimizes the hyperparameters of a quantification", "= self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score {score:.5f}') if", "yields less samples. :param error: an error function (callable) or", "= verbose self.__check_error(error) assert self.protocol in {'app', 'npp', 'gen'}, \\", "enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout) try: # overrides default", "\\ 'sample (instances) and their prevalence (ndarray) at each iteration.'", "eval_budget: if specified, sets a ceil on the number of", "end') self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting", "'random_seed': self.random_seed, 'verbose': False } if self.protocol == 'app': return", "\"\"\" Returns the best model found after calling the :meth:`fit`", "chosen hyperparameter combination. Ignored if protocol='gen' :param val_split: either a", ":param timeout: establishes a timer (in seconds) for each of", "3 classes, n_repetitions=1 and eval_budget=20, then n_prevpoints will be set", "the training set on which to optimize the hyperparameters :param", "configuration will be ignored. If all configurations end up being", "protocl='gen') :param protocol: either 'app' for the artificial prevalence protocol,", "the best model found by the model selection process. \"\"\"", "qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model = model", "then the prevalences for each class will be explored in", "is ignored for the protocol='app' if eval_budget is set and", "> 0: signal.alarm(0) except TimeoutError: print(f'timeout reached for config {params}')", ":param param_grid: a dictionary with keys the parameter names and", "a custom sampling generator function :param n_prevpoints: if specified, indicates", "an evaluation method and on an evaluation protocol for quantification.", "**commons) elif self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else:", "hyper-parameters that minimized the error function. :return: a trained quantifier", "= self.__check_training_validation(training, val_split) if self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size)", "is raised. If -1 (default) then no time bound is", "self.error = error elif isinstance(error, str): self.error = qp.error.from_name(error) elif", "should be in (0,1)' training, validation = training.split_stratified(train_prop=1 - validation,", "'app' for the artificial prevalence protocol, 'npp' for the natural", "one trained on the combination of hyper-parameters that minimized the", "def handler(signum, frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_", "[0,1] in order to define the prevalences of the samples;", "fit' return self.best_model().quantify(instances) @property def classes_(self): \"\"\" Classes on which", "is requested. Ignored if protocol!='app'. :param n_repetitions: the number of", "Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False):", "artificial_prevalence_prediction, natural_prevalence_prediction, gen_prevalence_prediction from quapy.method.aggregative import BaseQuantifier import inspect from", "those cases). :param eval_budget: if specified, sets a ceil on", "True if self.best_score_ is None and some_timeouts: raise TimeoutError('all jobs", "training set, or a callable returning a generator function each", "i.e., the one trained on the combination of hyper-parameters that", "a ndarray of shape `(n_classes)` with class prevalence estimates as", "\"\"\"Grid Search optimization targeting a quantification-oriented metric. Optimizes the hyperparameters", "return self.param_grid def best_model(self): \"\"\" Returns the best model found", ":param protocol: either 'app' for the artificial prevalence protocol, 'npp'", "Callable import numpy as np import quapy as qp from", "\"\"\" self.param_grid = parameters def get_params(self, deep=True): \"\"\"Returns the dictionary", "commons = { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed': self.random_seed, 'verbose':", "callable returning a generator function each time it is invoked", "set') self.best_model_.fit(training + val_split) return self def quantify(self, instances): \"\"\"Estimate", "copy import deepcopy from typing import Union, Callable import numpy", "returning a generator that yields ' f'the sample instances along", "be a callable function or a str representing\\n' f'the name", "process. \"\"\" assert hasattr(self, 'best_model_'), 'quantify called before fit' return", "eval_budget is set and is lower than the number of", "with the parameters being explored at this iteration model.set_params(**params) model.fit(training)", "prevalences for each class will be explored in [0.00, 0.25,", "iteration by ' f'setting protocol=\"gen\".') def __check_error(self, error): if error", "to explore \"\"\" self.param_grid = parameters def get_params(self, deep=True): \"\"\"Returns", "if protocol!='app'. :param n_repetitions: the number of repetitions for each", "LabelledCollection on which to test the performance of the different", "params = dict({k: values[i] for i, k in enumerate(params_keys)}) if", "of samples in those cases). :param eval_budget: if specified, sets", "Optimizes the hyperparameters of a quantification method, based on an", "trained on. :return: a ndarray of shape `(n_classes)` with the", "self.param_grid def best_model(self): \"\"\" Returns the best model found after", "on which to optimize the hyperparameters :param val_split: either a", "model: BaseQuantifier, param_grid: dict, sample_size: Union[int, None] = None, protocol='app',", "number of repetitions for each combination of prevalences. This parameter", "0.25, 0.50, 0.75, 1.00]. If not specified, then eval_budget is", "(valid ones are those in qp.error.QUANTIFICATION_ERROR :param refit: whether or", "= dict({k: values[i] for i, k in enumerate(params_keys)}) if self.timeout", "classes, n_repetitions=1 and eval_budget=20, then n_prevpoints will be set to", "replicate experiments. Ignored if protocol='gen'. :param timeout: establishes a timer", "callable function returning a generator that yields ' f'the sample", "hyperparams={params} got {self.error.__name__} score {score:.5f}') if self.best_score_ is None or", "values the list of values to explore \"\"\" self.param_grid =", "the hyperparameters :param val_split: either a LabelledCollection on which to", "f'proportion of training documents to extract (type found: {type(validation)}). '", "best model found after calling the :meth:`fit` method. :param instances:", "BaseQuantifier, param_grid: dict, sample_size: Union[int, None] = None, protocol='app', n_prevpoints:", "self.n_jobs = n_jobs self.random_seed = random_seed self.timeout = timeout self.verbose", "is lower than the number of combinations that would be", "best_model(self): \"\"\" Returns the best model found after calling the", "self.val_split = val_split self.n_jobs = n_jobs self.random_seed = random_seed self.timeout", "either a LabelledCollection on which to test the performance of", "shape `(n_classes)` with class prevalence estimates as according to the", "method. :param instances: sample contanining the instances :return: a ndarray", "eval_budget should be ' f'indicated (and should be >0).') if", "this iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score", "reached for config {params}') some_timeouts = True if self.best_score_ is", "the name of an error function (valid ones are those", "combinations that would be generated using the value assigned to", "of the hyperparameters configurations being tested. Whenever a run takes", "to replicate experiments. Ignored if protocol='gen'. :param timeout: establishes a", "self.error = qp.error.from_name(error) elif hasattr(error, '__call__'): self.error = error else:", "timeout: establishes a timer (in seconds) for each of the", "their prevalence (ndarray) at each iteration.' assert self.eval_budget is None", "overrides default parameters with the parameters being explored at this", "of the samples to extract from the validation set (ignored", "hyperparameter combination. Ignored if protocol='gen' :param val_split: either a LabelledCollection", "of combinations that would be generated using the value assigned", "hyperparameters :param val_split: either a LabelledCollection on which to test", "\"\"\"Sets the hyper-parameters to explore. :param parameters: a dictionary with", "the maximum number of samples to generate, but less samples", "of hyper-parameters that minimized the error function. :return: a trained", "optimize :type model: BaseQuantifier :param param_grid: a dictionary with keys", "best chosen hyperparameter combination. Ignored if protocol='gen' :param val_split: either", "to 5, since this will generate 15 different prevalences, i.e.,", "specified, sets a ceil on the number of evaluations to", "self.best_score_ = score self.best_params_ = params self.best_model_ = deepcopy(model) self.param_scores_[str(params)]", "optimize the hyperparameters :param val_split: either a LabelledCollection on which", "prevalence at each iteration by ' f'setting protocol=\"gen\".') def __check_error(self,", "representing\\n' f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def", "model, val_split, self.sample_size, **commons) elif self.protocol == 'gen': return gen_prevalence_prediction(model,", "= self.model if self.timeout > 0: def handler(signum, frame): self._sout('timeout", "example, if protocol='app', there are 3 classes, n_repetitions=1 and eval_budget=20,", "the artificial prevalence protocol, 'npp' for the natural prevalence protocol,", "'unknown protocol: valid ones are \"app\" or \"npp\" for the", "assigned to n_prevpoints (for the current number of classes and", "else: raise ValueError('unknown protocol') def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection,", "for the \"artificial\" or the \"natural\" prevalence ' \\ 'protocols.", "'verbose': False } if self.protocol == 'app': return artificial_prevalence_prediction( model,", "{'app', 'npp', 'gen'}, \\ 'unknown protocol: valid ones are \"app\"", "a float in (0,1) indicating the' f'proportion of training documents", "iteration.' assert self.eval_budget is None or isinstance(self.eval_budget, int) if self.protocol", "1: print('[warning] n_repetitions has been set and will be ignored", "true prevalence at each iteration by ' f'setting protocol=\"gen\".') def", "BaseQuantifier import inspect from util import _check_sample_size class GridSearchQ(BaseQuantifier): \"\"\"Grid", "!= 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values =", "quantification-oriented metric. Optimizes the hyperparameters of a quantification method, based", "up being ignored, a TimeoutError exception is raised. If -1", "generate more than 20. When protocol='gen', indicates the maximum number", "to get information through the stdout \"\"\" def __init__(self, model:", "error metric. :param training: the training set on which to", "generator function :param n_prevpoints: if specified, indicates the number of", "at each iteration by ' f'setting protocol=\"gen\".') def __check_error(self, error):", "is set. :param verbose: set to True to get information", "float in [0,1] indicating the proportion of labelled data to", "validation set (ignored if protocl='gen') :param protocol: either 'app' for", "interval [0,1] in order to define the prevalences of the", "i.e., [0, 0, 1], [0, 0.25, 0.75], [0, 0.5, 0.5]", "from the training set, or a callable returning a generator", "combination of hyper-parameters that minimized the error function. :return: a", "= None): \"\"\" Learning routine. Fits methods with all combinations", "the current number of classes and n_repetitions). Ignored for protocol='npp'", "combination. Ignored if protocol='gen' :param val_split: either a LabelledCollection on", "def quantify(self, instances): \"\"\"Estimate class prevalence values using the best", "than the timeout time to end') self._sout(f'optimization finished: best params", "are \"app\" or \"npp\" for the \"artificial\" or the \"natural\"", "def best_model(self): \"\"\" Returns the best model found after calling", "(ndarray) at each iteration.' assert self.eval_budget is None or isinstance(self.eval_budget,", "are 3 classes, n_repetitions=1 and eval_budget=20, then n_prevpoints will be", "assert self.protocol in {'app', 'npp', 'gen'}, \\ 'unknown protocol: valid", "be generated if the generator yields less samples. :param error:", "= timeout self.verbose = verbose self.__check_error(error) assert self.protocol in {'app',", "0.50, 0.75, 1.00]. If not specified, then eval_budget is requested.", "all configurations end up being ignored, a TimeoutError exception is", "params self.best_model_ = deepcopy(model) self.param_scores_[str(params)] = score if self.timeout >", "refit: whether or not to refit the model on the", "import itertools import signal from copy import deepcopy from typing", "`(n_classes)` with the class identifiers \"\"\" return self.best_model().classes_ def set_params(self,", "self._sout(f'optimization finished: best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on", "except TimeoutError: print(f'timeout reached for config {params}') some_timeouts = True", "a quantification-oriented metric. Optimizes the hyperparameters of a quantification method,", "training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return training, validation", "0.5] ... [1, 0, 0], and since setting it to", "on the whole development set') self.best_model_.fit(training + val_split) return self", "if eval_budget is set and is lower than the number", "0, 0], and since setting it to 6 would generate", "{} self.best_score_ = None some_timeouts = False for values in", "if protocol='gen' :param val_split: either a LabelledCollection on which to", "end up being ignored, a TimeoutError exception is raised. If", "random_state=self.random_seed) return training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return training,", "has been trained on. :return: a ndarray of shape `(n_classes)`", "with the class identifiers \"\"\" return self.best_model().classes_ def set_params(self, **parameters):", "explored in [0.00, 0.25, 0.50, 0.75, 1.00]. If not specified,", "timeout self.verbose = verbose self.__check_error(error) assert self.protocol in {'app', 'npp',", "self.protocol == 'npp': return natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif", "generate 15 different prevalences, i.e., [0, 0, 1], [0, 0.25,", "random generator to replicate experiments. Ignored if protocol='gen'. :param timeout:", "LabelledCollection): return training, validation elif isinstance(validation, float): assert 0. <", "function :param n_prevpoints: if specified, indicates the number of equally", "the samples to extract from the validation set (ignored if", "and since setting it to 6 would generate more than", "self.protocol != 'gen': self.sample_size = _check_sample_size(self.sample_size) params_keys = list(self.param_grid.keys()) params_values", "self.protocol == 'app': return artificial_prevalence_prediction( model, val_split, self.sample_size, n_prevpoints=self.n_prevpoints, eval_budget=self.eval_budget,", "training documents to extract (type found: {type(validation)}). ' f'Optionally, \"validation\"", "\"\"\"Estimate class prevalence values using the best model found after", "be >0).') if self.n_repetitions != 1: print('[warning] n_repetitions has been", "should be >0).') if self.n_repetitions != 1: print('[warning] n_repetitions has", "calling the :meth:`fit` method, i.e., the one trained on the", "(instances) and their prevalence (ndarray) at each iteration.' assert self.eval_budget", "the number of combinations that would be generated using the", "self.__check_error(error) assert self.protocol in {'app', 'npp', 'gen'}, \\ 'unknown protocol:", "seconds) for each of the hyperparameters configurations being tested. Whenever", "n_prevpoints: if specified, indicates the number of equally distant points", "dictionary of hyper-parameters to explore (`param_grid`) :param deep: Unused :return:", "__check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR: self.error = error elif", "self.model = model self.param_grid = param_grid self.sample_size = sample_size self.protocol", "samples to generate, but less samples will be generated if", "deep: Unused :return: the dictionary `param_grid` \"\"\" return self.param_grid def", "str): self.error = qp.error.from_name(error) elif hasattr(error, '__call__'): self.error = error", "\"\"\" if hasattr(self, 'best_model_'): return self.best_model_ raise ValueError('best_model called before", "should be ' f'indicated (and should be >0).') if self.n_repetitions", "in order to define the prevalences of the samples; e.g.,", "val_split: Union[LabelledCollection, float, Callable] = None): \"\"\" Learning routine. Fits", "the model on the whole labelled collection (training+validation) with the", "n_prevpoints=5, then the prevalences for each class will be explored", "the \"artificial\" or the \"natural\" prevalence ' \\ 'protocols. Use", "of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split):", "= None, protocol='app', n_prevpoints: int = None, n_repetitions: int =", "i, k in enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout) try:", "else: raise ValueError(f'unexpected error type; must either be a callable", "in qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error, str): self.error =", "generate, but less samples will be generated if the generator", "custom sampling generator function :param n_prevpoints: if specified, indicates the", "error function (callable) or a string indicating the name of", "(in seconds) for each of the hyperparameters configurations being tested.", "= score if self.timeout > 0: signal.alarm(0) except TimeoutError: print(f'timeout", "distant points to extract from the interval [0,1] in order", "self.protocol in ['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget is None", "self.best_model().quantify(instances) @property def classes_(self): \"\"\" Classes on which the quantifier", "Union[int, None] = None, protocol='app', n_prevpoints: int = None, n_repetitions:", ":param n_jobs: number of parallel jobs :param random_seed: set the", ":return: the dictionary `param_grid` \"\"\" return self.param_grid def best_model(self): \"\"\"", "some_timeouts = True if self.best_score_ is None and some_timeouts: raise", "= refit self.val_split = val_split self.n_jobs = n_jobs self.random_seed =", "validation elif isinstance(validation, float): assert 0. < validation < 1.,", "val_split: either a LabelledCollection on which to test the performance", "None] = None, protocol='app', n_prevpoints: int = None, n_repetitions: int", "protocol='app' if eval_budget is set and is lower than the", "protocol: valid ones are \"app\" or \"npp\" for the \"artificial\"", "' \\ 'protocols. Use protocol=\"gen\" when passing a generator function", "None or self.eval_budget <= 0): raise ValueError(f'when protocol=\"npp\" the parameter", "= deepcopy(model) self.param_scores_[str(params)] = score if self.timeout > 0: signal.alarm(0)", "model, val_split): commons = { 'n_repetitions': self.n_repetitions, 'n_jobs': self.n_jobs, 'random_seed':", "'gen'}, \\ 'unknown protocol: valid ones are \"app\" or \"npp\"", ":return: a ndarray of shape `(n_classes)` with class prevalence estimates", "iteration model.set_params(**params) model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score =", "prevalence (ndarray) at each iteration.' assert self.eval_budget is None or", "establishes a timer (in seconds) for each of the hyperparameters", "of the different settings, or a float in [0,1] indicating", "verbose: set to True to get information through the stdout", "for the artificial prevalence protocol, 'npp' for the natural prevalence", "training, validation elif isinstance(validation, float): assert 0. < validation <", "... [1, 0, 0], and since setting it to 6", "n_jobs self.random_seed = random_seed self.timeout = timeout self.verbose = verbose", "protocol for quantification. :param model: the quantifier to optimize :type", "all combinations of hyperparameters and selects the one minimizing the", "= model self.param_grid = param_grid self.sample_size = sample_size self.protocol =", "self.refit = refit self.val_split = val_split self.n_jobs = n_jobs self.random_seed", "a LabelledCollection or a float in (0,1) indicating the' f'proportion", "[0, 0.25, 0.75], [0, 0.5, 0.5] ... [1, 0, 0],", "self.n_repetitions = n_repetitions self.eval_budget = eval_budget self.refit = refit self.val_split", "as qp from quapy.data.base import LabelledCollection from quapy.evaluation import artificial_prevalence_prediction,", "explore. :param parameters: a dictionary with keys the parameter names", "self.verbose = verbose self.__check_error(error) assert self.protocol in {'app', 'npp', 'gen'},", "test the performance of the different settings, or a float", "trained quantifier \"\"\" if hasattr(self, 'best_model_'): return self.best_model_ raise ValueError('best_model", "float in (0,1) indicating the' f'proportion of training documents to", "score < self.best_score_: self.best_score_ = score self.best_params_ = params self.best_model_", "on which to test the performance of the different settings,", ":param model: the quantifier to optimize :type model: BaseQuantifier :param", "if specified, sets a ceil on the number of evaluations", "cases). :param eval_budget: if specified, sets a ceil on the", "{qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons = { 'n_repetitions': self.n_repetitions,", "the different settings, or a float in [0,1] indicating the", "to True to get information through the stdout \"\"\" def", "in {'app', 'npp', 'gen'}, \\ 'unknown protocol: valid ones are", "must either be a callable function or a str representing\\n'", "model.fit(training) true_prevalences, estim_prevalences = self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences)", "or self.eval_budget <= 0): raise ValueError(f'when protocol=\"npp\" the parameter eval_budget", "in (0,1) indicating the' f'proportion of training documents to extract", "minimized the error function. :return: a trained quantifier \"\"\" if", "This parameter is ignored for the protocol='app' if eval_budget is", "and their prevalence (ndarray) at each iteration.' assert self.eval_budget is", "or 'gen' for using a custom sampling generator function :param", "specified, indicates the number of equally distant points to extract", "return self def quantify(self, instances): \"\"\"Estimate class prevalence values using", "a timer (in seconds) for each of the hyperparameters configurations", "for quantification. :param model: the quantifier to optimize :type model:", "of equally distant points to extract from the interval [0,1]", "hasattr(self, 'best_model_'), 'quantify called before fit' return self.best_model().quantify(instances) @property def", "verbose=False): self.model = model self.param_grid = param_grid self.sample_size = sample_size", "None and some_timeouts: raise TimeoutError('all jobs took more than the", "selection process. \"\"\" assert hasattr(self, 'best_model_'), 'quantify called before fit'", "the instances :return: a ndarray of shape `(n_classes)` with class", "(ignored if protocl='gen') :param protocol: either 'app' for the artificial", "assert 0. < validation < 1., 'validation proportion should be", "that configuration will be ignored. If all configurations end up", "= eval_budget self.refit = refit self.val_split = val_split self.n_jobs =", "method, based on an evaluation method and on an evaluation", "has been set and will be ignored for the selected", "settings, or a float in [0,1] indicating the proportion of", "**commons ) elif self.protocol == 'npp': return natural_prevalence_prediction( model, val_split,", "a callable returning a generator function each time it is", "= None, error: Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1,", "self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <= 0): raise", "time bound is set. :param verbose: set to True to", "number of parallel jobs :param random_seed: set the seed of", "samples in those cases). :param eval_budget: if specified, sets a", "of the random generator to replicate experiments. Ignored if protocol='gen'.", "generator function each time it is invoked (only for protocol='gen').", "methods with all combinations of hyperparameters and selects the one", "maximum number of samples to generate, but less samples will", "'gen']: if self.protocol=='npp' and (self.eval_budget is None or self.eval_budget <=", "documents to extract (type found: {type(validation)}). ' f'Optionally, \"validation\" can", "estimates as according to the best model found by the", "if isinstance(validation, LabelledCollection): return training, validation elif isinstance(validation, float): assert", "'__call__'): self.error = error else: raise ValueError(f'unexpected error type; must", "qp.error.QUANTIFICATION_ERROR :param refit: whether or not to refit the model", "those in qp.error.QUANTIFICATION_ERROR :param refit: whether or not to refit", "and (self.eval_budget is None or self.eval_budget <= 0): raise ValueError(f'when", "been set and will be ignored for the selected protocol')", "for i, k in enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout)", ":param val_split: either a LabelledCollection on which to test the", "self.param_scores_ = {} self.best_score_ = None some_timeouts = False for", "the selected protocol') def _sout(self, msg): if self.verbose: print(f'[{self.__class__.__name__}]: {msg}')", "= qp.error.from_name(error) elif hasattr(error, '__call__'): self.error = error else: raise", "eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def fit(self, training: LabelledCollection, val_split:", "in enumerate(params_keys)}) if self.timeout > 0: signal.alarm(self.timeout) try: # overrides", "True to get information through the stdout \"\"\" def __init__(self,", "prevalences, i.e., [0, 0, 1], [0, 0.25, 0.75], [0, 0.5,", "performance of the different settings, or a float in [0,1]", "be set to 5, since this will generate 15 different", "the whole development set') self.best_model_.fit(training + val_split) return self def", "set and will be ignored for the selected protocol') def", "model found after calling the :meth:`fit` method. :param instances: sample", "import quapy as qp from quapy.data.base import LabelledCollection from quapy.evaluation", "list(self.param_grid.keys()) params_values = list(self.param_grid.values()) model = self.model if self.timeout >", "= random_seed self.timeout = timeout self.verbose = verbose self.__check_error(error) assert", "isinstance(self.eval_budget, int) if self.protocol in ['npp', 'gen']: if self.protocol=='npp' and", "function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model, val_split): commons = {", "' f'indicated (and should be >0).') if self.n_repetitions != 1:", "either 'app' for the artificial prevalence protocol, 'npp' for the", "val_split=0.4, n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model = model self.param_grid =", "self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets the hyper-parameters to explore. :param", "raise ValueError('unknown protocol') def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float,", "the size of the samples to extract from the validation", "the quantifier to optimize :type model: BaseQuantifier :param param_grid: a", "time it is invoked (only for protocol='gen'). :param n_jobs: number", "deepcopy from typing import Union, Callable import numpy as np", "less samples will be generated if the generator yields less", "the list of values to explore \"\"\" self.param_grid = parameters", "self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions self.eval_budget = eval_budget self.refit", "the :meth:`fit` method. :param instances: sample contanining the instances :return:", "parameter is ignored for the protocol='app' if eval_budget is set", "validation, random_state=self.random_seed) return training, validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return", "identifiers \"\"\" return self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets the hyper-parameters", "the training set, or a callable returning a generator function", "indicating the name of an error function (valid ones are", "value assigned to n_prevpoints (for the current number of classes", "Ignored for protocol='npp' and protocol='gen' (use eval_budget for setting a", "the stdout \"\"\" def __init__(self, model: BaseQuantifier, param_grid: dict, sample_size:", "bound is set. :param verbose: set to True to get", "model: the quantifier to optimize :type model: BaseQuantifier :param param_grid:", ":type model: BaseQuantifier :param param_grid: a dictionary with keys the", "must either be a LabelledCollection or a float in (0,1)", "(0,1) indicating the' f'proportion of training documents to extract (type", "with class prevalence estimates as according to the best model", "then eval_budget is requested. Ignored if protocol!='app'. :param n_repetitions: the", "None, error: Union[Callable, str] = qp.error.mae, refit=True, val_split=0.4, n_jobs=1, random_seed=42,", "after calling the :meth:`fit` method, i.e., the one trained on", "raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_ = None", "sample_size self.protocol = protocol.lower() self.n_prevpoints = n_prevpoints self.n_repetitions = n_repetitions", "hyperparameters configurations being tested. Whenever a run takes longer than", "= False for values in itertools.product(*params_values): params = dict({k: values[i]", "= val_split self.n_jobs = n_jobs self.random_seed = random_seed self.timeout =", ":param deep: Unused :return: the dictionary `param_grid` \"\"\" return self.param_grid", "calling the :meth:`fit` method. :param instances: sample contanining the instances", "number of classes and n_repetitions). Ignored for protocol='npp' and protocol='gen'", "Search optimization targeting a quantification-oriented metric. Optimizes the hyperparameters of", "the random generator to replicate experiments. Ignored if protocol='gen'. :param", "eval_budget is requested. Ignored if protocol!='app'. :param n_repetitions: the number", "gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown protocol') def fit(self, training:", "this timer, that configuration will be ignored. If all configurations", "self.__generate_predictions(model, val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__}", "None: val_split = self.val_split training, val_split = self.__check_training_validation(training, val_split) if", "valid ones are \"app\" or \"npp\" for the \"artificial\" or", "None or isinstance(self.eval_budget, int) if self.protocol in ['npp', 'gen']: if", "None, protocol='app', n_prevpoints: int = None, n_repetitions: int = 1,", "natural prevalence protocol, or 'gen' for using a custom sampling", "protocol='gen'). :param n_jobs: number of parallel jobs :param random_seed: set", "is None or isinstance(self.eval_budget, int) if self.protocol in ['npp', 'gen']:", "{score:.5f}') if self.best_score_ is None or score < self.best_score_: self.best_score_", "which to optimize the hyperparameters :param val_split: either a LabelledCollection", "self.param_grid = param_grid self.sample_size = sample_size self.protocol = protocol.lower() self.n_prevpoints", "verbose self.__check_error(error) assert self.protocol in {'app', 'npp', 'gen'}, \\ 'unknown", "a generator function thorough val_split that yields a ' \\", "indicates the number of equally distant points to extract from", "import BaseQuantifier import inspect from util import _check_sample_size class GridSearchQ(BaseQuantifier):", "to the best model found by the model selection process.", "would be generated using the value assigned to n_prevpoints (for", "1.00]. If not specified, then eval_budget is requested. Ignored if", "val_split = self.__check_training_validation(training, val_split) if self.protocol != 'gen': self.sample_size =", "for protocol='npp' and protocol='gen' (use eval_budget for setting a maximum", "or a float in (0,1) indicating the' f'proportion of training", "config {params}') some_timeouts = True if self.best_score_ is None and", "float): assert 0. < validation < 1., 'validation proportion should", "import Union, Callable import numpy as np import quapy as", "number of samples in those cases). :param eval_budget: if specified,", "an error function (callable) or a string indicating the name", "selects the one minimizing the error metric. :param training: the", "values[i] for i, k in enumerate(params_keys)}) if self.timeout > 0:", "Use protocol=\"gen\" when passing a generator function thorough val_split that", "TimeoutError exception is raised. If -1 (default) then no time", "n_repetitions: the number of repetitions for each combination of prevalences.", "eval_budget: int = None, error: Union[Callable, str] = qp.error.mae, refit=True,", "of labelled data to extract from the training set, or", "Ignored if protocol!='app'. :param n_repetitions: the number of repetitions for", "self \"\"\" if val_split is None: val_split = self.val_split training,", "self.best_score_: self.best_score_ = score self.best_params_ = params self.best_model_ = deepcopy(model)", "for the natural prevalence protocol, or 'gen' for using a", "be ignored. If all configurations end up being ignored, a", "natural_prevalence_prediction( model, val_split, self.sample_size, **commons) elif self.protocol == 'gen': return", "import deepcopy from typing import Union, Callable import numpy as", "def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable] = None):", "TimeoutError: print(f'timeout reached for config {params}') some_timeouts = True if", "f'setting protocol=\"gen\".') def __check_error(self, error): if error in qp.error.QUANTIFICATION_ERROR: self.error", "def set_params(self, **parameters): \"\"\"Sets the hyper-parameters to explore. :param parameters:", "training, validation): if isinstance(validation, LabelledCollection): return training, validation elif isinstance(validation,", "function (valid ones are those in qp.error.QUANTIFICATION_ERROR :param refit: whether", "self.error = error else: raise ValueError(f'unexpected error type; must either", "error else: raise ValueError(f'unexpected error type; must either be a", "ValueError('unknown protocol') def fit(self, training: LabelledCollection, val_split: Union[LabelledCollection, float, Callable]", "proportion of labelled data to extract from the training set,", "an evaluation protocol for quantification. :param model: the quantifier to", "prevalence protocol, or 'gen' for using a custom sampling generator", "samples; e.g., if n_prevpoints=5, then the prevalences for each class", "ceil on the number of evaluations to perform for each", "metric. Optimizes the hyperparameters of a quantification method, based on", ":return: a trained quantifier \"\"\" if hasattr(self, 'best_model_'): return self.best_model_", "best params {self.best_params_} (score={self.best_score_:.5f})') if self.refit: self._sout(f'refitting on the whole", "1, eval_budget: int = None, error: Union[Callable, str] = qp.error.mae,", "0.75, 1.00]. If not specified, then eval_budget is requested. Ignored", "set_params(self, **parameters): \"\"\"Sets the hyper-parameters to explore. :param parameters: a", "= error else: raise ValueError(f'unexpected error type; must either be", "extract from the validation set (ignored if protocl='gen') :param protocol:", "def __generate_predictions(self, model, val_split): commons = { 'n_repetitions': self.n_repetitions, 'n_jobs':", "timer, that configuration will be ignored. If all configurations end", "(callable) or a string indicating the name of an error", "through the stdout \"\"\" def __init__(self, model: BaseQuantifier, param_grid: dict,", "reached') raise TimeoutError() signal.signal(signal.SIGALRM, handler) self.param_scores_ = {} self.best_score_ =", "a generator function each time it is invoked (only for", "if n_prevpoints=5, then the prevalences for each class will be", "error function. :return: a trained quantifier \"\"\" if hasattr(self, 'best_model_'):", "in (0,1)' training, validation = training.split_stratified(train_prop=1 - validation, random_state=self.random_seed) return", "sample_size: the size of the samples to extract from the", "configurations end up being ignored, a TimeoutError exception is raised.", "it is invoked (only for protocol='gen'). :param n_jobs: number of", "deep=True): \"\"\"Returns the dictionary of hyper-parameters to explore (`param_grid`) :param", "val_split, self.sample_size, **commons) elif self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split,", "float, Callable] = None): \"\"\" Learning routine. Fits methods with", "would generate more than 20. When protocol='gen', indicates the maximum", "0.75], [0, 0.5, 0.5] ... [1, 0, 0], and since", "of shape `(n_classes)` with the class identifiers \"\"\" return self.best_model().classes_", "prevalence values using the best model found after calling the", "> 0: def handler(signum, frame): self._sout('timeout reached') raise TimeoutError() signal.signal(signal.SIGALRM,", "n_prevpoints (for the current number of classes and n_repetitions). Ignored", "on the combination of hyper-parameters that minimized the error function.", "by ' f'setting protocol=\"gen\".') def __check_error(self, error): if error in", ":param n_repetitions: the number of repetitions for each combination of", "prevalence ' \\ 'protocols. Use protocol=\"gen\" when passing a generator", ":param eval_budget: if specified, sets a ceil on the number", "dict({k: values[i] for i, k in enumerate(params_keys)}) if self.timeout >", "the :meth:`fit` method, i.e., the one trained on the combination", "handler) self.param_scores_ = {} self.best_score_ = None some_timeouts = False", "self.param_grid = parameters def get_params(self, deep=True): \"\"\"Returns the dictionary of", "name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}') def __generate_predictions(self, model,", "\"\"\" Classes on which the quantifier has been trained on.", "20. When protocol='gen', indicates the maximum number of samples to", "n_jobs=1, random_seed=42, timeout=-1, verbose=False): self.model = model self.param_grid = param_grid", "assert hasattr(self, 'best_model_'), 'quantify called before fit' return self.best_model().quantify(instances) @property", "typing import Union, Callable import numpy as np import quapy", "None some_timeouts = False for values in itertools.product(*params_values): params =", "hasattr(error, '__call__'): self.error = error else: raise ValueError(f'unexpected error type;", "extract from the interval [0,1] in order to define the", "evaluations to perform for each hyper-parameter combination. For example, if", "{type(validation)}). ' f'Optionally, \"validation\" can be a callable function returning", "quantifier to optimize :type model: BaseQuantifier :param param_grid: a dictionary", "the class identifiers \"\"\" return self.best_model().classes_ def set_params(self, **parameters): \"\"\"Sets", "quantify(self, instances): \"\"\"Estimate class prevalence values using the best model", "set to True to get information through the stdout \"\"\"", "type; must either be a callable function or a str", "instances along with their true prevalence at each iteration by", "if val_split is None: val_split = self.val_split training, val_split =", ":param random_seed: set the seed of the random generator to", "a string indicating the name of an error function (valid", "if self.protocol in ['npp', 'gen']: if self.protocol=='npp' and (self.eval_budget is", "with keys the parameter names and values the list of", "some_timeouts: raise TimeoutError('all jobs took more than the timeout time", "and on an evaluation protocol for quantification. :param model: the", "\\ 'unknown protocol: valid ones are \"app\" or \"npp\" for", "str representing\\n' f'the name of an error function in {qp.error.QUANTIFICATION_ERROR_NAMES}')", "is None: val_split = self.val_split training, val_split = self.__check_training_validation(training, val_split)", "exception is raised. If -1 (default) then no time bound", "combination of prevalences. This parameter is ignored for the protocol='app'", "for each combination of prevalences. This parameter is ignored for", "= self.val_split training, val_split = self.__check_training_validation(training, val_split) if self.protocol !=", "more than the timeout time to end') self._sout(f'optimization finished: best", "or a float in [0,1] indicating the proportion of labelled", "np import quapy as qp from quapy.data.base import LabelledCollection from", "the protocol='app' if eval_budget is set and is lower than", "[1, 0, 0], and since setting it to 6 would", "self.protocol == 'gen': return gen_prevalence_prediction(model, gen_fn=val_split, eval_budget=self.eval_budget) else: raise ValueError('unknown", "a TimeoutError exception is raised. If -1 (default) then no", "if self.timeout > 0: def handler(signum, frame): self._sout('timeout reached') raise", "if self.verbose: print(f'[{self.__class__.__name__}]: {msg}') def __check_training_validation(self, training, validation): if isinstance(validation,", "parameter eval_budget should be ' f'indicated (and should be >0).')", "perform for each hyper-parameter combination. For example, if protocol='app', there", "print(f'timeout reached for config {params}') some_timeouts = True if self.best_score_", "validation elif self.protocol=='gen' and inspect.isgenerator(validation()): return training, validation else: raise", "n_jobs: number of parallel jobs :param random_seed: set the seed", "self.eval_budget = eval_budget self.refit = refit self.val_split = val_split self.n_jobs", "each time it is invoked (only for protocol='gen'). :param n_jobs:", "or score < self.best_score_: self.best_score_ = score self.best_params_ = params", "signal.alarm(0) except TimeoutError: print(f'timeout reached for config {params}') some_timeouts =", "= n_prevpoints self.n_repetitions = n_repetitions self.eval_budget = eval_budget self.refit =", "int = None, n_repetitions: int = 1, eval_budget: int =", "6 would generate more than 20. When protocol='gen', indicates the", "n_repetitions). Ignored for protocol='npp' and protocol='gen' (use eval_budget for setting", "[0, 0, 1], [0, 0.25, 0.75], [0, 0.5, 0.5] ...", "returning a generator function each time it is invoked (only", "names and values the list of values to explore :param", "error): if error in qp.error.QUANTIFICATION_ERROR: self.error = error elif isinstance(error,", "training, val_split = self.__check_training_validation(training, val_split) if self.protocol != 'gen': self.sample_size", "= None some_timeouts = False for values in itertools.product(*params_values): params", "to perform for each hyper-parameter combination. For example, if protocol='app',", "val_split) score = self.error(true_prevalences, estim_prevalences) self._sout(f'checking hyperparams={params} got {self.error.__name__} score", "= parameters def get_params(self, deep=True): \"\"\"Returns the dictionary of hyper-parameters", "of values to explore \"\"\" self.param_grid = parameters def get_params(self,", "n_repetitions has been set and will be ignored for the", "there are 3 classes, n_repetitions=1 and eval_budget=20, then n_prevpoints will", "protocol='gen', indicates the maximum number of samples to generate, but", "= n_repetitions self.eval_budget = eval_budget self.refit = refit self.val_split =", "or isinstance(self.eval_budget, int) if self.protocol in ['npp', 'gen']: if self.protocol=='npp'" ]
[ "migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db, User=User, Role=Role,", "flask_migrate import Migrate from app import create_app, db from app.models", "# -*- coding: utf-8 -*- import os from flask_migrate import", "app import create_app, db from app.models import User, Role, PoseToLocation", "的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation)", "make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def", "db from app.models import User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG')", "\"\"\" run the unit tests \"\"\" import unittest tests =", "coding: utf-8 -*- import os from flask_migrate import Migrate from", "# migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db, User=User,", "return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test():", "= create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app, db) # migrate", "os from flask_migrate import Migrate from app import create_app, db", "create_app, db from app.models import User, Role, PoseToLocation app =", "import User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate", "# 单元测试 @app.cli.command() def test(): \"\"\" run the unit tests", "test(): \"\"\" run the unit tests \"\"\" import unittest tests", "Migrate from app import create_app, db from app.models import User,", "Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return", "PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test(): \"\"\" run the unit", "def make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command()", "from flask_migrate import Migrate from app import create_app, db from", "User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test(): \"\"\" run", "User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate =", "单元测试 @app.cli.command() def test(): \"\"\" run the unit tests \"\"\"", "run the unit tests \"\"\" import unittest tests = unittest.TestLoader().discover('tests')", "import os from flask_migrate import Migrate from app import create_app,", "-*- coding: utf-8 -*- import os from flask_migrate import Migrate", "= Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context():", "from app import create_app, db from app.models import User, Role,", "app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app, db) #", "def test(): \"\"\" run the unit tests \"\"\" import unittest", "create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app, db) # migrate 的新建", "dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test(): \"\"\"", "utf-8 -*- import os from flask_migrate import Migrate from app", "migrate = Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def", "PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app, db)", "the unit tests \"\"\" import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests)", "Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default') migrate = Migrate(app,", "or 'default') migrate = Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建", "'default') migrate = Migrate(app, db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor", "-*- import os from flask_migrate import Migrate from app import", "import Migrate from app import create_app, db from app.models import", "我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) #", "@app.shell_context_processor def make_shell_context(): return dict(db=db, User=User, Role=Role, PoseToLocation=PoseToLocation) # 单元测试", "@app.cli.command() def test(): \"\"\" run the unit tests \"\"\" import", "from app.models import User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or", "Role=Role, PoseToLocation=PoseToLocation) # 单元测试 @app.cli.command() def test(): \"\"\" run the", "app.models import User, Role, PoseToLocation app = create_app(os.getenv('FLASK_CONFIG') or 'default')", "db) # migrate 的新建 我们需要扫描到这些文件我们才能创建 @app.shell_context_processor def make_shell_context(): return dict(db=db,", "import create_app, db from app.models import User, Role, PoseToLocation app" ]
[ "- basin) basin.add(point) basins.append(basin) return basins def solve_part2(heightmap: Heightmap, low_points:", "for line in f] heightmap: Heightmap = dict() for (y,", "points_to_consider = {low_point} while points_to_consider: point = points_to_consider.pop() if heightmap[point]", "- 1), (x, y + 1), (x + 1, y),", "if all(heightmap[point] < height for height in surrounding_heights): low_points.add(point) return", "given point \"\"\" surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point] for", "& heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]: \"\"\"", "= [[int(x) for x in line.strip()] for line in f]", "the low points on the heightmap \"\"\" low_points: set[Point] =", "return heightmap def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]: \"\"\"", "y)] = height return heightmap def get_surrounding_points(heightmap: Heightmap, point: Point)", "= point return { (x - 1, y), (x, y", "def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]: \"\"\" Returns a", "surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point] < height for height", "enumerate(row): heightmap[(x, y)] = height return heightmap def get_surrounding_points(heightmap: Heightmap,", "= set[Point] def parse_input() -> Heightmap: \"\"\" Parses the input", "heightmap = parse_input() low_points = get_low_points(heightmap) part1 = solve_part1(heightmap, low_points)", "get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]: \"\"\" Returns a set", "low_point in low_points: basin: Basin = set() points_to_consider = {low_point}", "heightmap \"\"\" x, y = point return { (x -", "parse_input() -> Heightmap: \"\"\" Parses the input and returns a", "a Heightmap \"\"\" with open(INPUT_FILE) as f: heights = [[int(x)", "set[Point]) -> int: \"\"\" Calculates the sum of the risk", "Parses the input and returns a Heightmap \"\"\" with open(INPUT_FILE)", "\"\"\" return sum(1 + heightmap[point] for point in low_points) def", "on the heightmap \"\"\" low_points: set[Point] = set() for point", "points_to_consider: point = points_to_consider.pop() if heightmap[point] == 9: continue surrounding_points", "heightmap def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]: \"\"\" Returns", "-> int: \"\"\" Calculates the product of the sizes of", "low_points: basin: Basin = set() points_to_consider = {low_point} while points_to_consider:", "three largest basins \"\"\" basins = get_basins(heightmap, low_points) basin_sizes =", "surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return", "low_points) basin_sizes = sorted((len(basin) for basin in basins), reverse=True) return", "set() for point in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if", "in basins), reverse=True) return basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if", "in surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\" Finds the", "= set() for point in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point)", "point) if all(heightmap[point] < height for height in surrounding_heights): low_points.add(point)", "of all low points \"\"\" return sum(1 + heightmap[point] for", "set[Point]: \"\"\" Returns a set of surrounding points within the", "low_points.add(point) return low_points def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int:", "of the sizes of the three largest basins \"\"\" basins", "the given point \"\"\" surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point]", "height for height in surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap:", "-> set[int]: \"\"\" Returns the heights of points surrounding the", "basin.add(point) basins.append(basin) return basins def solve_part2(heightmap: Heightmap, low_points: set[Point]) ->", "sum of the risk levels of all low points \"\"\"", "f] heightmap: Heightmap = dict() for (y, row) in enumerate(heights):", "Finds all basins on the heightmap \"\"\" basins: list[Basin] =", "the three largest basins \"\"\" basins = get_basins(heightmap, low_points) basin_sizes", "return { (x - 1, y), (x, y - 1),", "surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point] for point in surrounding_points}", "heightmap: Heightmap = dict() for (y, row) in enumerate(heights): for", "= tuple[int, int] Heightmap = dict[Point, int] Basin = set[Point]", "basins), reverse=True) return basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if __name__", "= get_surrounding_points(heightmap, point) return {heightmap[point] for point in surrounding_points} def", "= get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return basins", "Returns the heights of points surrounding the given point \"\"\"", "set[Point]) -> list[Basin]: \"\"\" Finds all basins on the heightmap", "= get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for basin in basins),", "of the three largest basins \"\"\" basins = get_basins(heightmap, low_points)", "low_points: set[Point] = set() for point in heightmap: surrounding_heights =", "basin in basins), reverse=True) return basin_sizes[0] * basin_sizes[1] * basin_sizes[2]", "\"\"\" Finds the low points on the heightmap \"\"\" low_points:", "list[Basin]: \"\"\" Finds all basins on the heightmap \"\"\" basins:", "the sum of the risk levels of all low points", "= {low_point} while points_to_consider: point = points_to_consider.pop() if heightmap[point] ==", "basins def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates", "{ (x - 1, y), (x, y - 1), (x,", "[] for low_point in low_points: basin: Basin = set() points_to_consider", "= set() points_to_consider = {low_point} while points_to_consider: point = points_to_consider.pop()", "return low_points def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\"", "def parse_input() -> Heightmap: \"\"\" Parses the input and returns", "in low_points: basin: Basin = set() points_to_consider = {low_point} while", "with open(INPUT_FILE) as f: heights = [[int(x) for x in", "\"\"\" basins = get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for basin", "return basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if __name__ == \"__main__\":", "line.strip()] for line in f] heightmap: Heightmap = dict() for", "= get_surrounding_heights(heightmap, point) if all(heightmap[point] < height for height in", "\"__main__\": heightmap = parse_input() low_points = get_low_points(heightmap) part1 = solve_part1(heightmap,", "height) in enumerate(row): heightmap[(x, y)] = height return heightmap def", "= parse_input() low_points = get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2", "= sorted((len(basin) for basin in basins), reverse=True) return basin_sizes[0] *", "set[Point]: \"\"\" Finds the low points on the heightmap \"\"\"", "* basin_sizes[1] * basin_sizes[2] if __name__ == \"__main__\": heightmap =", "while points_to_consider: point = points_to_consider.pop() if heightmap[point] == 9: continue", "if heightmap[point] == 9: continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points", "for (y, row) in enumerate(heights): for (x, height) in enumerate(row):", "int: \"\"\" Calculates the product of the sizes of the", "low_points: set[Point]) -> list[Basin]: \"\"\" Finds all basins on the", "dict[Point, int] Basin = set[Point] def parse_input() -> Heightmap: \"\"\"", "basins on the heightmap \"\"\" basins: list[Basin] = [] for", "return basins def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\"", "x, y = point return { (x - 1, y),", "part1 = solve_part1(heightmap, low_points) part2 = solve_part2(heightmap, low_points) print(part1) print(part2)", "y), (x, y - 1), (x, y + 1), (x", "for point in low_points) def get_basins(heightmap: Heightmap, low_points: set[Point]) ->", "basin_sizes = sorted((len(basin) for basin in basins), reverse=True) return basin_sizes[0]", "point in low_points) def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]:", "height return heightmap def get_surrounding_points(heightmap: Heightmap, point: Point) -> set[Point]:", "1), (x + 1, y), } & heightmap.keys() def get_surrounding_heights(heightmap:", "surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\" Finds the low", "the input and returns a Heightmap \"\"\" with open(INPUT_FILE) as", "\"\"\" Calculates the product of the sizes of the three", "(y, row) in enumerate(heights): for (x, height) in enumerate(row): heightmap[(x,", "of surrounding points within the heightmap \"\"\" x, y =", "levels of all low points \"\"\" return sum(1 + heightmap[point]", "9: continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point)", "\"\"\" Returns a set of surrounding points within the heightmap", "for point in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point]", "set[int]: \"\"\" Returns the heights of points surrounding the given", "Basin = set[Point] def parse_input() -> Heightmap: \"\"\" Parses the", "height in surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap: Heightmap, low_points:", "\"\"\" Calculates the sum of the risk levels of all", "f: heights = [[int(x) for x in line.strip()] for line", "def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the", "(x - 1, y), (x, y - 1), (x, y", "point: Point) -> set[Point]: \"\"\" Returns a set of surrounding", "y), } & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point) ->", "set() points_to_consider = {low_point} while points_to_consider: point = points_to_consider.pop() if", "return {heightmap[point] for point in surrounding_points} def get_low_points(heightmap: Heightmap) ->", "heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]: \"\"\" Returns", "the heightmap \"\"\" basins: list[Basin] = [] for low_point in", "Calculates the sum of the risk levels of all low", "1), (x, y + 1), (x + 1, y), }", "heightmap[point] for point in low_points) def get_basins(heightmap: Heightmap, low_points: set[Point])", "low_points def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates", "points surrounding the given point \"\"\" surrounding_points = get_surrounding_points(heightmap, point)", "point return { (x - 1, y), (x, y -", "1, y), } & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point)", "low_points = get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2 = solve_part2(heightmap,", "(x, height) in enumerate(row): heightmap[(x, y)] = height return heightmap", "__name__ == \"__main__\": heightmap = parse_input() low_points = get_low_points(heightmap) part1", "heightmap \"\"\" low_points: set[Point] = set() for point in heightmap:", "continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin)", "low_points: set[Point]) -> int: \"\"\" Calculates the product of the", "reverse=True) return basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if __name__ ==", "if __name__ == \"__main__\": heightmap = parse_input() low_points = get_low_points(heightmap)", "Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the product of", "\"\"\" with open(INPUT_FILE) as f: heights = [[int(x) for x", "get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]: \"\"\" Returns the heights", "points within the heightmap \"\"\" x, y = point return", "{low_point} while points_to_consider: point = points_to_consider.pop() if heightmap[point] == 9:", "(x, y + 1), (x + 1, y), } &", "line in f] heightmap: Heightmap = dict() for (y, row)", "heightmap[point] == 9: continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points -", "largest basins \"\"\" basins = get_basins(heightmap, low_points) basin_sizes = sorted((len(basin)", "get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return basins def", "-> set[Point]: \"\"\" Finds the low points on the heightmap", "== 9: continue surrounding_points = get_surrounding_points(heightmap, point) points_to_consider.update(surrounding_points - basin)", "y + 1), (x + 1, y), } & heightmap.keys()", "Heightmap = dict() for (y, row) in enumerate(heights): for (x,", "points \"\"\" return sum(1 + heightmap[point] for point in low_points)", "solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the product", "as f: heights = [[int(x) for x in line.strip()] for", "point: Point) -> set[int]: \"\"\" Returns the heights of points", "-> set[Point]: \"\"\" Returns a set of surrounding points within", "(x, y - 1), (x, y + 1), (x +", "+ 1, y), } & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point:", "the heightmap \"\"\" x, y = point return { (x", "point = points_to_consider.pop() if heightmap[point] == 9: continue surrounding_points =", "Calculates the product of the sizes of the three largest", "sizes of the three largest basins \"\"\" basins = get_basins(heightmap,", "Heightmap: \"\"\" Parses the input and returns a Heightmap \"\"\"", "- 1, y), (x, y - 1), (x, y +", "Heightmap, point: Point) -> set[int]: \"\"\" Returns the heights of", "(x + 1, y), } & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap,", "get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\" Finds the low points on", "get_surrounding_heights(heightmap, point) if all(heightmap[point] < height for height in surrounding_heights):", "basins.append(basin) return basins def solve_part2(heightmap: Heightmap, low_points: set[Point]) -> int:", "* basin_sizes[2] if __name__ == \"__main__\": heightmap = parse_input() low_points", "the risk levels of all low points \"\"\" return sum(1", "enumerate(heights): for (x, height) in enumerate(row): heightmap[(x, y)] = height", "heightmap \"\"\" basins: list[Basin] = [] for low_point in low_points:", "= dict() for (y, row) in enumerate(heights): for (x, height)", "set[Point] = set() for point in heightmap: surrounding_heights = get_surrounding_heights(heightmap,", "basins \"\"\" basins = get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for", "basins = get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for basin in", "y = point return { (x - 1, y), (x,", "all basins on the heightmap \"\"\" basins: list[Basin] = []", "get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2 = solve_part2(heightmap, low_points) print(part1)", "in surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap: Heightmap, low_points: set[Point])", "-> int: \"\"\" Calculates the sum of the risk levels", "for basin in basins), reverse=True) return basin_sizes[0] * basin_sizes[1] *", "solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the sum", "heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point] < height for", "and returns a Heightmap \"\"\" with open(INPUT_FILE) as f: heights", "Heightmap, low_points: set[Point]) -> list[Basin]: \"\"\" Finds all basins on", "of points surrounding the given point \"\"\" surrounding_points = get_surrounding_points(heightmap,", "the sizes of the three largest basins \"\"\" basins =", "get_basins(heightmap, low_points) basin_sizes = sorted((len(basin) for basin in basins), reverse=True)", "in line.strip()] for line in f] heightmap: Heightmap = dict()", "dict() for (y, row) in enumerate(heights): for (x, height) in", "within the heightmap \"\"\" x, y = point return {", "for low_point in low_points: basin: Basin = set() points_to_consider =", "surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap: Heightmap, low_points: set[Point]) ->", "get_surrounding_points(heightmap, point) return {heightmap[point] for point in surrounding_points} def get_low_points(heightmap:", "int] Heightmap = dict[Point, int] Basin = set[Point] def parse_input()", "\"\"\" Parses the input and returns a Heightmap \"\"\" with", "the heightmap \"\"\" low_points: set[Point] = set() for point in", "all(heightmap[point] < height for height in surrounding_heights): low_points.add(point) return low_points", "\"\"\" low_points: set[Point] = set() for point in heightmap: surrounding_heights", "low_points: set[Point]) -> int: \"\"\" Calculates the sum of the", "int: \"\"\" Calculates the sum of the risk levels of", "list[Basin] = [] for low_point in low_points: basin: Basin =", "set[Point]) -> int: \"\"\" Calculates the product of the sizes", "product of the sizes of the three largest basins \"\"\"", "heightmap[(x, y)] = height return heightmap def get_surrounding_points(heightmap: Heightmap, point:", "points on the heightmap \"\"\" low_points: set[Point] = set() for", "in f] heightmap: Heightmap = dict() for (y, row) in", "for (x, height) in enumerate(row): heightmap[(x, y)] = height return", "surrounding points within the heightmap \"\"\" x, y = point", "Heightmap \"\"\" with open(INPUT_FILE) as f: heights = [[int(x) for", "Point) -> set[int]: \"\"\" Returns the heights of points surrounding", "of the risk levels of all low points \"\"\" return", "in low_points) def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]: \"\"\"", "= get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2 = solve_part2(heightmap, low_points)", "int] Basin = set[Point] def parse_input() -> Heightmap: \"\"\" Parses", "for height in surrounding_heights): low_points.add(point) return low_points def solve_part1(heightmap: Heightmap,", "returns a Heightmap \"\"\" with open(INPUT_FILE) as f: heights =", "+ 1), (x + 1, y), } & heightmap.keys() def", "Point = tuple[int, int] Heightmap = dict[Point, int] Basin =", "low_points) def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]: \"\"\" Finds", "Finds the low points on the heightmap \"\"\" low_points: set[Point]", "for x in line.strip()] for line in f] heightmap: Heightmap", "for point in surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\"", "def get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]: \"\"\" Finds all", "basin_sizes[0] * basin_sizes[1] * basin_sizes[2] if __name__ == \"__main__\": heightmap", "\"\"\" Returns the heights of points surrounding the given point", "< height for height in surrounding_heights): low_points.add(point) return low_points def", "Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the sum of", "-> list[Basin]: \"\"\" Finds all basins on the heightmap \"\"\"", "[[int(x) for x in line.strip()] for line in f] heightmap:", "in enumerate(heights): for (x, height) in enumerate(row): heightmap[(x, y)] =", "Returns a set of surrounding points within the heightmap \"\"\"", "\"\"\" surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point] for point in", "{heightmap[point] for point in surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]:", "row) in enumerate(heights): for (x, height) in enumerate(row): heightmap[(x, y)]", "low points on the heightmap \"\"\" low_points: set[Point] = set()", "risk levels of all low points \"\"\" return sum(1 +", "on the heightmap \"\"\" basins: list[Basin] = [] for low_point", "basins: list[Basin] = [] for low_point in low_points: basin: Basin", "= [] for low_point in low_points: basin: Basin = set()", "-> Heightmap: \"\"\" Parses the input and returns a Heightmap", "= points_to_consider.pop() if heightmap[point] == 9: continue surrounding_points = get_surrounding_points(heightmap,", "INPUT_FILE = \"../../input/09.txt\" Point = tuple[int, int] Heightmap = dict[Point,", "heights = [[int(x) for x in line.strip()] for line in", "Heightmap, point: Point) -> set[Point]: \"\"\" Returns a set of", "open(INPUT_FILE) as f: heights = [[int(x) for x in line.strip()]", "\"../../input/09.txt\" Point = tuple[int, int] Heightmap = dict[Point, int] Basin", "\"\"\" x, y = point return { (x - 1,", "basin_sizes[1] * basin_sizes[2] if __name__ == \"__main__\": heightmap = parse_input()", "= height return heightmap def get_surrounding_points(heightmap: Heightmap, point: Point) ->", "Heightmap) -> set[Point]: \"\"\" Finds the low points on the", "set of surrounding points within the heightmap \"\"\" x, y", "= dict[Point, int] Basin = set[Point] def parse_input() -> Heightmap:", "\"\"\" basins: list[Basin] = [] for low_point in low_points: basin:", "Heightmap = dict[Point, int] Basin = set[Point] def parse_input() ->", "low points \"\"\" return sum(1 + heightmap[point] for point in", "sorted((len(basin) for basin in basins), reverse=True) return basin_sizes[0] * basin_sizes[1]", "sum(1 + heightmap[point] for point in low_points) def get_basins(heightmap: Heightmap,", "parse_input() low_points = get_low_points(heightmap) part1 = solve_part1(heightmap, low_points) part2 =", "+ heightmap[point] for point in low_points) def get_basins(heightmap: Heightmap, low_points:", "point) points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return basins def solve_part2(heightmap:", "all low points \"\"\" return sum(1 + heightmap[point] for point", "basin: Basin = set() points_to_consider = {low_point} while points_to_consider: point", "in enumerate(row): heightmap[(x, y)] = height return heightmap def get_surrounding_points(heightmap:", "point in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point] <", "the product of the sizes of the three largest basins", "= \"../../input/09.txt\" Point = tuple[int, int] Heightmap = dict[Point, int]", "1, y), (x, y - 1), (x, y + 1),", "y - 1), (x, y + 1), (x + 1,", "the heights of points surrounding the given point \"\"\" surrounding_points", "in heightmap: surrounding_heights = get_surrounding_heights(heightmap, point) if all(heightmap[point] < height", "== \"__main__\": heightmap = parse_input() low_points = get_low_points(heightmap) part1 =", "get_basins(heightmap: Heightmap, low_points: set[Point]) -> list[Basin]: \"\"\" Finds all basins", "def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]: \"\"\" Returns the", "set[Point] def parse_input() -> Heightmap: \"\"\" Parses the input and", "Basin = set() points_to_consider = {low_point} while points_to_consider: point =", "} & heightmap.keys() def get_surrounding_heights(heightmap: Heightmap, point: Point) -> set[int]:", "input and returns a Heightmap \"\"\" with open(INPUT_FILE) as f:", "x in line.strip()] for line in f] heightmap: Heightmap =", "heights of points surrounding the given point \"\"\" surrounding_points =", "Point) -> set[Point]: \"\"\" Returns a set of surrounding points", "def get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\" Finds the low points", "basin_sizes[2] if __name__ == \"__main__\": heightmap = parse_input() low_points =", "point in surrounding_points} def get_low_points(heightmap: Heightmap) -> set[Point]: \"\"\" Finds", "\"\"\" Finds all basins on the heightmap \"\"\" basins: list[Basin]", "def solve_part1(heightmap: Heightmap, low_points: set[Point]) -> int: \"\"\" Calculates the", "tuple[int, int] Heightmap = dict[Point, int] Basin = set[Point] def", "surrounding the given point \"\"\" surrounding_points = get_surrounding_points(heightmap, point) return", "points_to_consider.pop() if heightmap[point] == 9: continue surrounding_points = get_surrounding_points(heightmap, point)", "point) return {heightmap[point] for point in surrounding_points} def get_low_points(heightmap: Heightmap)", "a set of surrounding points within the heightmap \"\"\" x,", "return sum(1 + heightmap[point] for point in low_points) def get_basins(heightmap:", "point \"\"\" surrounding_points = get_surrounding_points(heightmap, point) return {heightmap[point] for point", "basin) basin.add(point) basins.append(basin) return basins def solve_part2(heightmap: Heightmap, low_points: set[Point])", "points_to_consider.update(surrounding_points - basin) basin.add(point) basins.append(basin) return basins def solve_part2(heightmap: Heightmap," ]
[ "via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main(): docolor", "level = logging.DEBUG elif Args.verbose == 1: level = logging.INFO", "def __init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop =", "Args from lib.loghandler import LogHandler import lib.connection as Connection def", "level = logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host)", "while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10) if __name__ == '__main__':", "main(): docolor = (Args.color == 'always') or (Args.color == 'auto'", "== 'auto' and sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if", "signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop() mainloop.run()", "GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10) if __name__", "Mainloop\") self.mainloop = GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run()", "KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def", "except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit()", "signal import logging import sys from gi.repository import GObject GObject.threads_init()", "self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop = GObject.MainLoop() def", "run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\")", "SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop =", "self.mainloop.quit() def main(): docolor = (Args.color == 'always') or (Args.color", "= LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >= 2: level =", "lib.loghandler import LogHandler import lib.connection as Connection def testCallback(args): log", "== 1: level = logging.INFO else: level = logging.WARNING logging.root.setLevel(level)", "= logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def __init__(self): self.log = logging.getLogger(\"Voctoconfig\")", "= logging.DEBUG elif Args.verbose == 1: level = logging.INFO else:", "2: level = logging.DEBUG elif Args.verbose == 1: level =", "from lib.args import Args from lib.loghandler import LogHandler import lib.connection", "from gi.repository import GObject GObject.threads_init() import time from lib.args import", "def run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via", "import lib.connection as Connection def testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args))", "if Args.verbose >= 2: level = logging.DEBUG elif Args.verbose ==", "sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >= 2:", "import time from lib.args import Args from lib.loghandler import LogHandler", "MainLoop\") self.mainloop.quit() def main(): docolor = (Args.color == 'always') or", "= logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop = GObject.MainLoop() def run(self):", "logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def __init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating", "or (Args.color == 'auto' and sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp)", "logging import sys from gi.repository import GObject GObject.threads_init() import time", "self.log.info(\"Running MainLoop\") try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def", "GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated", "GObject Mainloop\") self.mainloop = GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\") try:", "lib.connection as Connection def testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args)) class", "class Voctoconfig(object): def __init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\")", "signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop() mainloop.run() while", "import Args from lib.loghandler import LogHandler import lib.connection as Connection", "logging.DEBUG elif Args.verbose == 1: level = logging.INFO else: level", "import logging import sys from gi.repository import GObject GObject.threads_init() import", "= logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode()", "testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def __init__(self): self.log", "log = logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def __init__(self): self.log =", "1: level = logging.INFO else: level = logging.WARNING logging.root.setLevel(level) logging.debug('setting", "from lib.loghandler import LogHandler import lib.connection as Connection def testCallback(args):", "python3 import signal import logging import sys from gi.repository import", "logging.INFO else: level = logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT,", "import GObject GObject.threads_init() import time from lib.args import Args from", "logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop", "logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop = GObject.MainLoop() def run(self): self.log.info(\"Running", "== 'always') or (Args.color == 'auto' and sys.stderr.isatty()) loghandler =", "self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\")", "def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main(): docolor = (Args.color", "def testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def __init__(self):", "loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >= 2: level", "(Args.color == 'always') or (Args.color == 'auto' and sys.stderr.isatty()) loghandler", "import LogHandler import lib.connection as Connection def testCallback(args): log =", "mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10) if __name__ ==", "KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main(): docolor =", "import sys from gi.repository import GObject GObject.threads_init() import time from", "self.log.debug(\"Creating GObject Mainloop\") self.mainloop = GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\")", "Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\")", "Args.verbose >= 2: level = logging.DEBUG elif Args.verbose == 1:", "Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >= 2: level = logging.DEBUG elif", "try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting", "self.mainloop = GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run() except", "Args.verbose == 1: level = logging.INFO else: level = logging.WARNING", "Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\",", "handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop()", "MainLoop\") try: self.mainloop.run() except KeyboardInterrupt: self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self):", "Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback) mainloop = GObject.MainLoop() mainloop.run() while True:", "as Connection def testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object):", "= GObject.MainLoop() def run(self): self.log.info(\"Running MainLoop\") try: self.mainloop.run() except KeyboardInterrupt:", "(Args.color == 'auto' and sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler)", "mainloop = GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10)", "lib.args import Args from lib.loghandler import LogHandler import lib.connection as", "= logging.INFO else: level = logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler')", "else: level = logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL)", "Connection def testCallback(args): log = logging.getLogger(\"Test\") log.info(str(args)) class Voctoconfig(object): def", "<filename>playground.py #!/usr/bin/env python3 import signal import logging import sys from", "= GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10) if", "docolor = (Args.color == 'always') or (Args.color == 'auto' and", "logging.root.addHandler(loghandler) if Args.verbose >= 2: level = logging.DEBUG elif Args.verbose", "log.info(str(args)) class Voctoconfig(object): def __init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject", "__init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop = GObject.MainLoop()", "self.log.info(\"Terminated via KeyboardInterrupt\") def quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main():", "level = logging.INFO else: level = logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT", "self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main(): docolor = (Args.color == 'always')", "GObject GObject.threads_init() import time from lib.args import Args from lib.loghandler", "'always') or (Args.color == 'auto' and sys.stderr.isatty()) loghandler = LogHandler(docolor,", "#!/usr/bin/env python3 import signal import logging import sys from gi.repository", "elif Args.verbose == 1: level = logging.INFO else: level =", "sys from gi.repository import GObject GObject.threads_init() import time from lib.args", "= (Args.color == 'always') or (Args.color == 'auto' and sys.stderr.isatty())", "logging.WARNING logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\",", "quit(self): self.log.info(\"Quitting MainLoop\") self.mainloop.quit() def main(): docolor = (Args.color ==", "gi.repository import GObject GObject.threads_init() import time from lib.args import Args", "import signal import logging import sys from gi.repository import GObject", "def main(): docolor = (Args.color == 'always') or (Args.color ==", "logging.root.setLevel(level) logging.debug('setting SIGINT handler') signal.signal(signal.SIGINT, signal.SIG_DFL) Connection.establish(Args.host) Connection.enterNonblockingMode() Connection.on(\"message\", testCallback)", "LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >= 2: level = logging.DEBUG", "GObject.threads_init() import time from lib.args import Args from lib.loghandler import", "testCallback) mainloop = GObject.MainLoop() mainloop.run() while True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\")", "and sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose >=", ">= 2: level = logging.DEBUG elif Args.verbose == 1: level", "Voctoconfig(object): def __init__(self): self.log = logging.getLogger(\"Voctoconfig\") self.log.debug(\"Creating GObject Mainloop\") self.mainloop", "True: logging.debug(\"mimimi...\") Connection.send(\"message\", \"test2\") time.sleep(10) if __name__ == '__main__': main()", "LogHandler import lib.connection as Connection def testCallback(args): log = logging.getLogger(\"Test\")", "'auto' and sys.stderr.isatty()) loghandler = LogHandler(docolor, Args.timestamp) logging.root.addHandler(loghandler) if Args.verbose", "time from lib.args import Args from lib.loghandler import LogHandler import" ]
[ "except KeyError: env_step = 0 return epoch, env_step, gradient_step class", "the writer to log data. :param int train_interval: the log", "its name to TensorboardLogger in #427. This class is for", "tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger that relies", "\"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} ) def restore_data(self) -> Tuple[int, int,", ") -> None: if save_checkpoint_fn and epoch - self.last_save_step >=", "the log interval in log_test_data(). Default to 1. :param int", "int = 1000, test_interval: int = 1, update_interval: int =", "= 1000, save_interval: int = 1, ) -> None: super().__init__(train_interval,", "epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step,", "# offline trainer doesn't have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step", "from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger that", "-> Tuple[int, int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: #", "gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step = epoch gradient_step", "import event_accumulator from torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE,", "= ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step = epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step", "default to visualize \\ and log statistics. :param SummaryWriter writer:", "1. :param int update_interval: the log interval in log_update_data(). Default", "writer def write(self, step_type: str, step: int, data: LOG_DATA_TYPE) ->", "int, save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None, ) ->", "in save_data(). Default to 1 (save at the end of", "train_interval: int = 1000, test_interval: int = 1, update_interval: int", "save_data( self, epoch: int, env_step: int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int,", "interval in save_data(). Default to 1 (save at the end", "in #427. This class is for compatibility. \"\"\" def __init__(self,", "epoch). \"\"\" def __init__( self, writer: SummaryWriter, train_interval: int =", "env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step})", "None]] = None, ) -> None: if save_checkpoint_fn and epoch", "have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except KeyError:", "int save_interval: the save interval in save_data(). Default to 1", "\"\"\"BasicLogger has changed its name to TensorboardLogger in #427. This", "in log_train_data(). Default to 1000. :param int test_interval: the log", "self.last_save_step = -1 self.writer = writer def write(self, step_type: str,", "for k, v in data.items(): self.writer.add_scalar(k, v, global_step=step) def save_data(", "int], None]] = None, ) -> None: if save_checkpoint_fn and", ">= self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch,", "save_interval self.last_save_step = -1 self.writer = writer def write(self, step_type:", "# epoch / gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step", "env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} ) def", "gradient_step except KeyError: epoch, gradient_step = 0, 0 try: #", "gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its name to TensorboardLogger", "*args: Any, **kwargs: Any) -> None: warnings.warn( \"Deprecated soon: BasicLogger", "def restore_data(self) -> Tuple[int, int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload()", "epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step}", "0 try: # offline trainer doesn't have env_step env_step =", "class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its name to TensorboardLogger in", "gradient_step = 0, 0 try: # offline trainer doesn't have", "in log_test_data(). Default to 1. :param int update_interval: the log", "1 (save at the end of each epoch). \"\"\" def", "None: if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval: self.last_save_step", "0, 0 try: # offline trainer doesn't have env_step env_step", ":param int update_interval: the log interval in log_update_data(). Default to", "int update_interval: the log interval in log_update_data(). Default to 1000.", "self.last_log_test_step = epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except", "\\ and log statistics. :param SummaryWriter writer: the writer to", "env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except KeyError: env_step =", "= 1, update_interval: int = 1000, save_interval: int = 1,", "int, int], None]] = None, ) -> None: if save_checkpoint_fn", "that relies on tensorboard SummaryWriter by default to visualize \\", "from torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class", "is for compatibility. \"\"\" def __init__(self, *args: Any, **kwargs: Any)", "tensorboard SummaryWriter by default to visualize \\ and log statistics.", "data: LOG_DATA_TYPE) -> None: for k, v in data.items(): self.writer.add_scalar(k,", "from typing import Any, Callable, Optional, Tuple from tensorboard.backend.event_processing import", "changed its name to TensorboardLogger in #427. This class is", "update_interval: int = 1000, save_interval: int = 1, ) ->", "BasicLogger has renamed to TensorboardLogger in #427.\" ) super().__init__(*args, **kwargs)", "Any, Callable, Optional, Tuple from tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard", "k, v in data.items(): self.writer.add_scalar(k, v, global_step=step) def save_data( self,", "relies on tensorboard SummaryWriter by default to visualize \\ and", "= None, ) -> None: if save_checkpoint_fn and epoch -", "= epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except KeyError:", "int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch /", "import SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A", "env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its name to", "= writer def write(self, step_type: str, step: int, data: LOG_DATA_TYPE)", "ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except KeyError: env_step = 0 return", "tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base import", "= -1 self.writer = writer def write(self, step_type: str, step:", "1000, test_interval: int = 1, update_interval: int = 1000, save_interval:", "save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval: self.last_save_step = epoch", "update_interval: the log interval in log_update_data(). Default to 1000. :param", "epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step,", "self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} )", "\"\"\"A logger that relies on tensorboard SummaryWriter by default to", "to 1000. :param int test_interval: the log interval in log_test_data().", "1000, save_interval: int = 1, ) -> None: super().__init__(train_interval, test_interval,", "1, ) -> None: super().__init__(train_interval, test_interval, update_interval) self.save_interval = save_interval", "and log statistics. :param SummaryWriter writer: the writer to log", "interval in log_update_data(). Default to 1000. :param int save_interval: the", "int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None,", "self.last_save_step = epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch})", "by default to visualize \\ and log statistics. :param SummaryWriter", "trainer doesn't have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step", "event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch / gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step", "statistics. :param SummaryWriter writer: the writer to log data. :param", "env_step = 0 return epoch, env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger", "self.save_interval = save_interval self.last_save_step = -1 self.writer = writer def", "class TensorboardLogger(BaseLogger): \"\"\"A logger that relies on tensorboard SummaryWriter by", "int, data: LOG_DATA_TYPE) -> None: for k, v in data.items():", "- self.last_save_step >= self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch, env_step, gradient_step)", "= event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch / gradient_step epoch =", "**kwargs: Any) -> None: warnings.warn( \"Deprecated soon: BasicLogger has renamed", "import warnings from typing import Any, Callable, Optional, Tuple from", "Any) -> None: warnings.warn( \"Deprecated soon: BasicLogger has renamed to", "from tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base", "epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except KeyError: epoch,", "BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its name to TensorboardLogger in #427.", "global_step=step) def save_data( self, epoch: int, env_step: int, gradient_step: int,", "None: warnings.warn( \"Deprecated soon: BasicLogger has renamed to TensorboardLogger in", "save interval in save_data(). Default to 1 (save at the", "log data. :param int train_interval: the log interval in log_train_data().", "epoch: int, env_step: int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int, int],", "and epoch - self.last_save_step >= self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch,", "gradient_step, {\"save/gradient_step\": gradient_step} ) def restore_data(self) -> Tuple[int, int, int]:", "gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write(", "= save_interval self.last_save_step = -1 self.writer = writer def write(self,", "soon: BasicLogger has renamed to TensorboardLogger in #427.\" ) super().__init__(*args,", "save_data(). Default to 1 (save at the end of each", ":param int train_interval: the log interval in log_train_data(). Default to", "log_update_data(). Default to 1000. :param int save_interval: the save interval", "SummaryWriter, train_interval: int = 1000, test_interval: int = 1, update_interval:", "env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except KeyError: env_step", "0 return epoch, env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed", "1000. :param int test_interval: the log interval in log_test_data(). Default", "log_test_data(). Default to 1. :param int update_interval: the log interval", "Default to 1 (save at the end of each epoch).", "self, epoch: int, env_step: int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int,", "data. :param int train_interval: the log interval in log_train_data(). Default", "of each epoch). \"\"\" def __init__( self, writer: SummaryWriter, train_interval:", "{\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\":", "env_step except KeyError: env_step = 0 return epoch, env_step, gradient_step", "__init__(self, *args: Any, **kwargs: Any) -> None: warnings.warn( \"Deprecated soon:", "on tensorboard SummaryWriter by default to visualize \\ and log", "(save at the end of each epoch). \"\"\" def __init__(", "step_type: str, step: int, data: LOG_DATA_TYPE) -> None: for k,", "= gradient_step except KeyError: epoch, gradient_step = 0, 0 try:", "ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step = epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step", "int = 1, update_interval: int = 1000, save_interval: int =", "super().__init__(train_interval, test_interval, update_interval) self.save_interval = save_interval self.last_save_step = -1 self.writer", "1, update_interval: int = 1000, save_interval: int = 1, )", "save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None, ) -> None:", "str, step: int, data: LOG_DATA_TYPE) -> None: for k, v", "#427. This class is for compatibility. \"\"\" def __init__(self, *args:", "BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger that relies on tensorboard SummaryWriter", "\"\"\" def __init__( self, writer: SummaryWriter, train_interval: int = 1000,", "Callable, Optional, Tuple from tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard import", "int = 1, ) -> None: super().__init__(train_interval, test_interval, update_interval) self.save_interval", "ea.Reload() try: # epoch / gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step", "the log interval in log_train_data(). Default to 1000. :param int", "the save interval in save_data(). Default to 1 (save at", "int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch / gradient_step", "self.last_save_step = self.last_log_test_step = epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step =", "= 0 return epoch, env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has", "to TensorboardLogger in #427. This class is for compatibility. \"\"\"", "to 1. :param int update_interval: the log interval in log_update_data().", "write(self, step_type: str, step: int, data: LOG_DATA_TYPE) -> None: for", ":param int save_interval: the save interval in save_data(). Default to", "def write(self, step_type: str, step: int, data: LOG_DATA_TYPE) -> None:", "TensorboardLogger in #427. This class is for compatibility. \"\"\" def", "each epoch). \"\"\" def __init__( self, writer: SummaryWriter, train_interval: int", "log statistics. :param SummaryWriter writer: the writer to log data.", "= ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except KeyError: epoch, gradient_step =", "{\"save/env_step\": env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} ) def restore_data(self)", "gradient_step} ) def restore_data(self) -> Tuple[int, int, int]: ea =", "interval in log_test_data(). Default to 1. :param int update_interval: the", "logger that relies on tensorboard SummaryWriter by default to visualize", "return epoch, env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its", "Any, **kwargs: Any) -> None: warnings.warn( \"Deprecated soon: BasicLogger has", "epoch, env_step, gradient_step class BasicLogger(TensorboardLogger): \"\"\"BasicLogger has changed its name", "def __init__( self, writer: SummaryWriter, train_interval: int = 1000, test_interval:", "save_interval: the save interval in save_data(). Default to 1 (save", "test_interval, update_interval) self.save_interval = save_interval self.last_save_step = -1 self.writer =", "update_interval) self.save_interval = save_interval self.last_save_step = -1 self.writer = writer", "-> None: super().__init__(train_interval, test_interval, update_interval) self.save_interval = save_interval self.last_save_step =", "if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval: self.last_save_step =", "except KeyError: epoch, gradient_step = 0, 0 try: # offline", "log_train_data(). Default to 1000. :param int test_interval: the log interval", "/ gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step = epoch", "the log interval in log_update_data(). Default to 1000. :param int", "for compatibility. \"\"\" def __init__(self, *args: Any, **kwargs: Any) ->", "int, env_step: int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int, int], None]]", "typing import Any, Callable, Optional, Tuple from tensorboard.backend.event_processing import event_accumulator", "= 1, ) -> None: super().__init__(train_interval, test_interval, update_interval) self.save_interval =", "epoch / gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step =", "test_interval: int = 1, update_interval: int = 1000, save_interval: int", "__init__( self, writer: SummaryWriter, train_interval: int = 1000, test_interval: int", "<reponame>Aceticia/tianshou<filename>tianshou/utils/logger/tensorboard.py import warnings from typing import Any, Callable, Optional, Tuple", "LOG_DATA_TYPE) -> None: for k, v in data.items(): self.writer.add_scalar(k, v,", "gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except KeyError: epoch, gradient_step", "-1 self.writer = writer def write(self, step_type: str, step: int,", "env_step}) self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} ) def restore_data(self) ->", "TensorboardLogger(BaseLogger): \"\"\"A logger that relies on tensorboard SummaryWriter by default", "= ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except KeyError: env_step = 0", "-> None: for k, v in data.items(): self.writer.add_scalar(k, v, global_step=step)", "self.writer.add_scalar(k, v, global_step=step) def save_data( self, epoch: int, env_step: int,", "Tuple from tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard import SummaryWriter from", "to 1 (save at the end of each epoch). \"\"\"", "self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\":", "warnings.warn( \"Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427.\"", "\"Deprecated soon: BasicLogger has renamed to TensorboardLogger in #427.\" )", ") def restore_data(self) -> Tuple[int, int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir)", "self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\": env_step}) self.write( \"save/gradient_step\",", ") -> None: super().__init__(train_interval, test_interval, update_interval) self.save_interval = save_interval self.last_save_step", "Optional, Tuple from tensorboard.backend.event_processing import event_accumulator from torch.utils.tensorboard import SummaryWriter", "to 1000. :param int save_interval: the save interval in save_data().", "1000. :param int save_interval: the save interval in save_data(). Default", "try: # epoch / gradient_step epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step =", "KeyError: env_step = 0 return epoch, env_step, gradient_step class BasicLogger(TensorboardLogger):", "None: super().__init__(train_interval, test_interval, update_interval) self.save_interval = save_interval self.last_save_step = -1", "offline trainer doesn't have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step =", "-> None: warnings.warn( \"Deprecated soon: BasicLogger has renamed to TensorboardLogger", "= self.last_log_test_step = epoch gradient_step = ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step", ":param int test_interval: the log interval in log_test_data(). Default to", "int = 1000, save_interval: int = 1, ) -> None:", "env_step: int, gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int, int], None]] =", "KeyError: epoch, gradient_step = 0, 0 try: # offline trainer", "interval in log_train_data(). Default to 1000. :param int test_interval: the", "name to TensorboardLogger in #427. This class is for compatibility.", "{\"save/gradient_step\": gradient_step} ) def restore_data(self) -> Tuple[int, int, int]: ea", ":param SummaryWriter writer: the writer to log data. :param int", "doesn't have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step self.last_log_train_step = env_step except", "epoch = ea.scalars.Items(\"save/epoch\")[-1].step self.last_save_step = self.last_log_test_step = epoch gradient_step =", "warnings from typing import Any, Callable, Optional, Tuple from tensorboard.backend.event_processing", "step: int, data: LOG_DATA_TYPE) -> None: for k, v in", "SummaryWriter writer: the writer to log data. :param int train_interval:", "import Any, Callable, Optional, Tuple from tensorboard.backend.event_processing import event_accumulator from", "= 1000, test_interval: int = 1, update_interval: int = 1000,", "event_accumulator from torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger", "SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger", "at the end of each epoch). \"\"\" def __init__( self,", "= epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\",", "ea.scalars.Items(\"save/gradient_step\")[-1].step self.last_log_update_step = gradient_step except KeyError: epoch, gradient_step = 0,", "train_interval: the log interval in log_train_data(). Default to 1000. :param", "This class is for compatibility. \"\"\" def __init__(self, *args: Any,", "self.last_save_step >= self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\",", "try: # offline trainer doesn't have env_step env_step = ea.scalars.Items(\"save/env_step\")[-1].step", "in data.items(): self.writer.add_scalar(k, v, global_step=step) def save_data( self, epoch: int,", "def __init__(self, *args: Any, **kwargs: Any) -> None: warnings.warn( \"Deprecated", "in log_update_data(). Default to 1000. :param int save_interval: the save", "to visualize \\ and log statistics. :param SummaryWriter writer: the", "None, ) -> None: if save_checkpoint_fn and epoch - self.last_save_step", "save_checkpoint_fn(epoch, env_step, gradient_step) self.write(\"save/epoch\", epoch, {\"save/epoch\": epoch}) self.write(\"save/env_step\", env_step, {\"save/env_step\":", "self.last_log_update_step = gradient_step except KeyError: epoch, gradient_step = 0, 0", "Default to 1000. :param int test_interval: the log interval in", "test_interval: the log interval in log_test_data(). Default to 1. :param", "epoch - self.last_save_step >= self.save_interval: self.last_save_step = epoch save_checkpoint_fn(epoch, env_step,", "= env_step except KeyError: env_step = 0 return epoch, env_step,", "self, writer: SummaryWriter, train_interval: int = 1000, test_interval: int =", "-> None: if save_checkpoint_fn and epoch - self.last_save_step >= self.save_interval:", "class is for compatibility. \"\"\" def __init__(self, *args: Any, **kwargs:", "the end of each epoch). \"\"\" def __init__( self, writer:", "int test_interval: the log interval in log_test_data(). Default to 1.", "self.write( \"save/gradient_step\", gradient_step, {\"save/gradient_step\": gradient_step} ) def restore_data(self) -> Tuple[int,", "writer: SummaryWriter, train_interval: int = 1000, test_interval: int = 1,", "None: for k, v in data.items(): self.writer.add_scalar(k, v, global_step=step) def", "epoch, gradient_step = 0, 0 try: # offline trainer doesn't", "self.last_log_train_step = env_step except KeyError: env_step = 0 return epoch,", "has changed its name to TensorboardLogger in #427. This class", "def save_data( self, epoch: int, env_step: int, gradient_step: int, save_checkpoint_fn:", "log interval in log_train_data(). Default to 1000. :param int test_interval:", "torch.utils.tensorboard import SummaryWriter from tianshou.utils.logger.base import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger):", "to log data. :param int train_interval: the log interval in", "Default to 1000. :param int save_interval: the save interval in", "log interval in log_test_data(). Default to 1. :param int update_interval:", "end of each epoch). \"\"\" def __init__( self, writer: SummaryWriter,", "\"\"\" def __init__(self, *args: Any, **kwargs: Any) -> None: warnings.warn(", "Default to 1. :param int update_interval: the log interval in", "visualize \\ and log statistics. :param SummaryWriter writer: the writer", "v in data.items(): self.writer.add_scalar(k, v, global_step=step) def save_data( self, epoch:", "LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger that relies on tensorboard", "import LOG_DATA_TYPE, BaseLogger class TensorboardLogger(BaseLogger): \"\"\"A logger that relies on", "writer: the writer to log data. :param int train_interval: the", "SummaryWriter by default to visualize \\ and log statistics. :param", "int train_interval: the log interval in log_train_data(). Default to 1000.", "v, global_step=step) def save_data( self, epoch: int, env_step: int, gradient_step:", "Tuple[int, int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch", "ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try: # epoch / gradient_step epoch", "data.items(): self.writer.add_scalar(k, v, global_step=step) def save_data( self, epoch: int, env_step:", "= 0, 0 try: # offline trainer doesn't have env_step", "Optional[Callable[[int, int, int], None]] = None, ) -> None: if", "restore_data(self) -> Tuple[int, int, int]: ea = event_accumulator.EventAccumulator(self.writer.log_dir) ea.Reload() try:", "save_interval: int = 1, ) -> None: super().__init__(train_interval, test_interval, update_interval)", "gradient_step: int, save_checkpoint_fn: Optional[Callable[[int, int, int], None]] = None, )", "log interval in log_update_data(). Default to 1000. :param int save_interval:", "self.writer = writer def write(self, step_type: str, step: int, data:", "compatibility. \"\"\" def __init__(self, *args: Any, **kwargs: Any) -> None:", "writer to log data. :param int train_interval: the log interval" ]
[ "self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype", "2.0 (the \"License\"); # you may not use this file", "= c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p,", "permissions and # limitations under the License. from ctypes import", "self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes", "self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint;", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "print(\"Constructed empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype =", "return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int;", "self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes", "self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool;", "c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype", "get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def", "self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes =", "from ctypes import c_int from ctypes import c_void_p from ctypes", "containerboximage; def set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p,", "language governing permissions and # limitations under the License. from", "and buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint];", "def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def", "use this file except in compliance with the License. #", "maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new();", "= c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p];", "buttongap); else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty", "is not None and columngap is not None and buttongap", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref,", "SDL-based 2D game-engine # Copyright (C) 2018 InfernoStudios # #", "c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype", "License. # You may obtain a copy of the License", "self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p];", "= [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self, x,", "under the License is distributed on an \"AS IS\" BASIS,", "= jetfuelsoloader.jetfuelso; if(maxheight is not None and columngap is not", "License for the specific language governing permissions and # limitations", "self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p];", "self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes =", "= self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def set_container_box_image(self, image, borderwidth, borderheight):", "ctypes import c_uint from ctypes import c_int from ctypes import", "Copyright (C) 2018 InfernoStudios # # Licensed under the Apache", "def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref);", "import c_void_p from ctypes import c_bool from ctypes import c_wchar_p", "c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes", "self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype =", "= c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_height.restype", "c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype", "c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_height.restype =", "= self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref", "= c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes =", "not None and buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint,", "= c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self):", "def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref);", "in compliance with the License. # You may obtain a", "software # distributed under the License is distributed on an", "self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self,", "buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is not None and columngap", "self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def set_container_box_image(self, image,", "self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype = c_void_p;", "self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else:", "c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype", "[c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p];", "set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self,", "self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is not None and columngap is", "set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x,", "get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def", "c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref =", "borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth,", "= c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p,", "OF ANY KIND, either express or implied. # See the", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "import c_uint from ctypes import c_int from ctypes import c_void_p", "from ctypes import c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image", "self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def", "ANY KIND, either express or implied. # See the License", "See the License for the specific language governing permissions and", "= c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p,", "def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref);", "the License. # You may obtain a copy of the", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y);", "to in writing, software # distributed under the License is", "c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype =", "# See the License for the specific language governing permissions", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "with the License. # You may obtain a copy of", "self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref =", "[c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes =", "= [c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self):", "self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def", "= [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes", "governing permissions and # limitations under the License. from ctypes", "jetfuel.draw.image import image class menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None, columngap=None,", "set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self):", "from ctypes import c_bool from ctypes import c_wchar_p from jetfuel.draw.rectangleinterface", "self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype =", "compliance with the License. # You may obtain a copy", "agreed to in writing, software # distributed under the License", "distributed under the License is distributed on an \"AS IS\"", "[c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p];", "self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes =", "return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes =", "and # limitations under the License. from ctypes import c_uint", "express or implied. # See the License for the specific", "except in compliance with the License. # You may obtain", "get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref); def", "= [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight):", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p, c_wchar_p,", "def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p,", "c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick,", "not use this file except in compliance with the License.", "writing, software # distributed under the License is distributed on", "return self.Menu_get_position_y(self.drawableref); def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int,", "you may not use this file except in compliance with", "buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p;", "if(maxheight is not None and columngap is not None and", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image import image class menu(rectangle_interface): def", "= c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def get_max_height(self):", "containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def set_container_box_image(self, image, borderwidth,", "= [c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return", "y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def", "2018 InfernoStudios # # Licensed under the Apache License, Version", "= [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p];", "get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def", "CONDITIONS OF ANY KIND, either express or implied. # See", "self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint;", "Jetfuel Game Engine- A SDL-based 2D game-engine # Copyright (C)", "return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes", "self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap):", "= c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype", "ctypes import c_int from ctypes import c_void_p from ctypes import", "self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def", "[c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes", "self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_height.restype = c_int; return", "[c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes =", "OR CONDITIONS OF ANY KIND, either express or implied. #", "the License is distributed on an \"AS IS\" BASIS, #", "containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image(", "import image class menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None):", "return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_height.restype = c_int;", "self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes =", "and columngap is not None and buttongap is not None):", "columngap, buttongap); else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed", "[c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight,", "def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_height.restype = c_int; return self.Menu_get_rect_to_draw_height(self.drawableref);", "maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return", "c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref);", "= [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self,", "x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int;", "law or agreed to in writing, software # distributed under", "= c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick,", "import c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image import image", "self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return", "self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes", "= [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def", "get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref);", "not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p;", "# Jetfuel Game Engine- A SDL-based 2D game-engine # Copyright", "may obtain a copy of the License at # #", "c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus):", "under the License. from ctypes import c_uint from ctypes import", "jetfuelsoloader.jetfuelso; if(maxheight is not None and columngap is not None", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "Engine- A SDL-based 2D game-engine # Copyright (C) 2018 InfernoStudios", "c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype =", "columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return", "None and columngap is not None and buttongap is not", "columngap is not None and buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes", "[c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self, x, y):", "maxheight=None, columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is not None", "buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype", "may not use this file except in compliance with the", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "this file except in compliance with the License. # You", "import c_int from ctypes import c_void_p from ctypes import c_bool", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap);", "[c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes =", "# # Licensed under the Apache License, Version 2.0 (the", "None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref", "file except in compliance with the License. # You may", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "[c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref);", "uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p];", "c_void_p from ctypes import c_bool from ctypes import c_wchar_p from", "= c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype", "get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref); def", "uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype =", "= [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes", "def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref);", "InfernoStudios # # Licensed under the Apache License, Version 2.0", "c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap);", "def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width(", "self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes =", "get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def", "self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype", "maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes", "self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self,", "messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return", "c_uint from ctypes import c_int from ctypes import c_void_p from", "import rectangle_interface from jetfuel.draw.image import image class menu(rectangle_interface): def __init__(self,", "borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight);", "or implied. # See the License for the specific language", "rectangle_interface from jetfuel.draw.image import image class menu(rectangle_interface): def __init__(self, jetfuelsoloader,", "= self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p];", "= [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes =", "KIND, either express or implied. # See the License for", "specific language governing permissions and # limitations under the License.", "c_int from ctypes import c_void_p from ctypes import c_bool from", "= [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes =", "self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self,", "self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self):", "self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight);", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint; return", "return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref,", "image class menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel", "def __init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight", "def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref);", "def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref);", "self.Menu_get_position_x(self.drawableref); def get_position_y(self): self._jetfuel.Menu_get_position_y.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_y.restype = c_int; return", "c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p];", "(the \"License\"); # you may not use this file except", "is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype =", "self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint;", "# you may not use this file except in compliance", "c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes =", "A SDL-based 2D game-engine # Copyright (C) 2018 InfernoStudios #", "c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap, buttongap); else: self._jetfuel.Menu_new.restype =", "# # Unless required by applicable law or agreed to", "ctypes import c_bool from ctypes import c_wchar_p from jetfuel.draw.rectangleinterface import", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "else: self._jetfuel.Menu_new.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\");", "Version 2.0 (the \"License\"); # you may not use this", "def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def", "the License. from ctypes import c_uint from ctypes import c_int", "# limitations under the License. from ctypes import c_uint from", "__init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is", "def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int]; self._jetfuel.Menu_set_position(self.drawableref,", "add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p,", "implied. # See the License for the specific language governing", "[c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage; def set_container_box_image(self,", "under the Apache License, Version 2.0 (the \"License\"); # you", "self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self):", "jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is not", "by applicable law or agreed to in writing, software #", "self._jetfuel.Menu_set_position(self.drawableref, x, y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype =", "borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint;", "empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_max_height.restype = c_uint;", "def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage", "get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage =", "def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def", "[c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes", "self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes =", "borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return", "= image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref);", "messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int;", "[c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, maxheight); def get_column_gap(self): self._jetfuel.Menu_get_column_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_column_gap.restype", "c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype =", "return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype =", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "y); def get_rect_to_draw_width(self): self._jetfuel.Menu_get_rect_to_draw_width.argtypes = [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return", "[c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes", "Unless required by applicable law or agreed to in writing,", "= [c_uint, c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps(", "2D game-engine # Copyright (C) 2018 InfernoStudios # # Licensed", "game-engine # Copyright (C) 2018 InfernoStudios # # Licensed under", "image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return", "the specific language governing permissions and # limitations under the", "self._jetfuel.Menu_new.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def", "from jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image import image class menu(rectangle_interface):", "c_void_p; self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes", "c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref,", "applicable law or agreed to in writing, software # distributed", "messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype =", "c_int; return self.Menu_get_position_y(self.drawableref); def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p,", "(C) 2018 InfernoStudios # # Licensed under the Apache License,", "self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p];", "image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref,", "= [c_void_p]; self._jetfuel.Menu_get_rect_to_draw_width.restype = c_int; return self.Menu_get_rect_to_draw_width(self.drawableref); def get_rect_to_draw_height(self): self._jetfuel.Menu_get_rect_to_draw_height.argtypes", "in writing, software # distributed under the License is distributed", "self.drawableref = self._jetfuel.Menu_new(); print(\"Constructed empty drawableref!\"); def get_max_height(self): self._jetfuel.Menu_get_max_height.argtypes =", "= [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p];", "# Copyright (C) 2018 InfernoStudios # # Licensed under the", "[c_void_p, c_void_p, c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref,", "buttoncharsreplacement.buttoncharsref, uisactiontowatchfor, messagetosenduponclick, messagebus.messagebusref); def get_position_x(self): self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype", "self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def", "from jetfuel.draw.image import image class menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None,", "= [c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self):", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "License, Version 2.0 (the \"License\"); # you may not use", "return containerboximage; def set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p,", "# You may obtain a copy of the License at", "[c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement,", "ctypes import c_void_p from ctypes import c_bool from ctypes import", "= c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref", "self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage = image(jetfuelsoloader); self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref);", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "class menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel =", "self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor, messagetosenduponclick, messagebus): self._jetfuel.Menu_add_button.argtypes = [c_void_p,", "def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height(", "from ctypes import c_void_p from ctypes import c_bool from ctypes", "ctypes import c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image import", "the License for the specific language governing permissions and #", "return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref,", "columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes", "Apache License, Version 2.0 (the \"License\"); # you may not", "c_wchar_p, c_wchar_p, c_void_p]; self._jetfuel.Menu_add_button.restype = c_bool; return self._jetfuel.Menu_add_button(self.drawableref, buttoncharsreplacement.buttoncharsref, uisactiontowatchfor,", "either express or implied. # See the License for the", "c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint];", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "License. from ctypes import c_uint from ctypes import c_int from", "is not None and buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes =", "c_uint, c_uint]; self._jetfuel.Menu_new_from_heights_and_gaps.restype = c_void_p; self.drawableref = self._jetfuel.Menu_new_from_heights_and_gaps( maxheight, columngap,", "self._jetfuel.Menu_get_container_box_border_height.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_height( self.drawableref); def add_button(self, buttoncharsreplacement, uisactiontowatchfor,", "[c_void_p]; self._jetfuel.Menu_get_container_box_border_width.restype = c_uint; return self._jetfuel.Menu_get_container_box_border_width( self.drawableref); def get_container_box_border_height(self): self._jetfuel.Menu_get_container_box_border_height.argtypes", "not None and columngap is not None and buttongap is", "c_uint; return self._jetfuel.Menu_get_max_height(self.drawableref); def set_max_height(self, maxheight): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];", "self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes", "self._jetfuel.Image_delete.argtypes = [c_void_p]; self._jetfuel.Image_delete(containerboximage.imageref); containerboximage.imageref = self._jetfuel.Menu_get_container_box_image( self.drawableref); return containerboximage;", "c_uint, c_uint]; self._jetfuel.Menu_set_container_box_image(image.imageref, borderwidth, borderheight); def get_container_box_border_width(self): self._jetfuel.Menu_get_container_box_border_width.argtypes = [c_void_p];", "None and buttongap is not None): self._jetfuel.Menu_new_from_heights_and_gaps.argtypes = [c_uint, c_uint,", "import c_bool from ctypes import c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes =", "self.Menu_get_position_y(self.drawableref); def set_position(self, x, y): self._jetfuel.Menu_set_position.argtypes = [c_void_p, c_int, c_int];", "buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap); def get_container_box_image(self, jetfuelsoloader):", "def set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint,", "limitations under the License. from ctypes import c_uint from ctypes", "self._jetfuel.Menu_get_position_x.argtypes = [c_void_p]; self._jetfuel.Menu_get_position_x.restype = c_int; return self.Menu_get_position_x(self.drawableref); def get_position_y(self):", "jetfuelsoloader): self._jetfuel.Menu_get_container_box_image.argtypes = [c_void_p]; self._jetfuel.Menu_get_container_box_image.restype = c_void_p; containerboximage = image(jetfuelsoloader);", "\"License\"); # you may not use this file except in", "= [c_void_p]; self._jetfuel.Menu_get_button_gap.restype = c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap):", "self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_max_height(self.drawableref, buttongap);", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface from jetfuel.draw.image import image class", "# distributed under the License is distributed on an \"AS", "Game Engine- A SDL-based 2D game-engine # Copyright (C) 2018", "# Unless required by applicable law or agreed to in", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "menu(rectangle_interface): def __init__(self, jetfuelsoloader, maxheight=None, columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso;", "from ctypes import c_uint from ctypes import c_int from ctypes", "c_uint; return self._jetfuel.Menu_get_column_gap(self.drawableref); def set_button_gap(self, buttongap): self._jetfuel.Menu_set_max_height.argtypes = [c_void_p, c_uint];", "You may obtain a copy of the License at #", "c_bool from ctypes import c_wchar_p from jetfuel.draw.rectangleinterface import rectangle_interface from", "set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes = [c_void_p, c_void_p, c_uint, c_uint];", "the Apache License, Version 2.0 (the \"License\"); # you may", "self.drawableref); return containerboximage; def set_container_box_image(self, image, borderwidth, borderheight): self._jetfuel.Menu_set_container_box_image.argtypes =", "set_column_gap(self, columngap): self._jetfuel.Menu_set_column_gap.argtypes = [c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self):", "[c_void_p, c_uint]; self._jetfuel.Menu_set_column_height(self.drawableref, columngap); def get_button_gap(self): self._jetfuel.Menu_get_button_gap.argtypes = [c_void_p]; self._jetfuel.Menu_get_button_gap.restype", "columngap=None, buttongap=None): self._jetfuel = jetfuelsoloader.jetfuelso; if(maxheight is not None and" ]
[ "x: jax.lax.psum(x, 'i'), 'i', devices=devices) def pre_pmap(xs): return jax.tree_map(lambda x:", "10, 20, 50]: t_inference_start = time.time() pred_acc = 0 pred_denominator", "jax_utils.unreplicate(optimizer), step) # Periodic metric handling. # Training Metrics if", "2.0 (the \"License\"); # you may not use this file", "warmup until warmup_steps, * rsqrt_decay: divide by square root of", "= pred_batch[0].shape[0] if cur_pred_batch_size % n_devices: padded_size = int( np.ceil(cur_pred_batch_size", "(io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern,", "id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)} char_id_table", "model checkpointed at step %d.', start_step) if FLAGS.finetune_start_step > 0:", "functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length,", "# Beam search metrics. if (step and step % FLAGS.predict_freq", "search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds =", "Handle final odd-sized batch by padding instead of dropping it.", "(_, logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer", "decoder model.\"\"\" # --> [batch * beam, 1, vocab] flat_logits", "steps between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps", "/ eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() == 0:", "batch.\"\"\" # Prepare transformer fast-decoder call for beam search: for", "for evaluation during training.\"\"\" weights = jnp.where( jnp.logical_and(programs > 0,", "training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.') flags.DEFINE_integer('predict_freq',", "than tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs,", "programs) if not FLAGS.slow_decode else None) predicted = p_pred_step(optimizer.target, inputs,", "index, beam in enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string() except: #", "steps between training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between", "metrics_sums.pop('denominator') summary = jax.tree_map( lambda x: x / denominator, #", "supervised training tasks.\"\"\" # pytype: disable=wrong-arg-count # pytype: disable=attribute-error import", "the above-defined single-step decoder function, run a # beam search", "c_id > 0]) inps, outs = [], [] for inp,", "Main Train Loop # --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count()) del", "1.0 for name in factors: if name == 'constant': ret", "in s if c_id > 0]) inps, outs = [],", "weight_sum, } metrics = jax.lax.psum(metrics, 'batch') return metrics # Train", "string tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id in s if c_id", "* linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay:", "elif name == 'linear_warmup': ret *= jnp.minimum(1.0, step / warmup_steps)", "return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs) def post_pmap(xs):", "= jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics.", "training. eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease batch of predict dataset", "jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer", "decomposition_models as models from latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill import", "permissions and # limitations under the License. # python3 \"\"\"Train", "= int( np.ceil(cur_pred_batch_size / n_devices) * n_devices) # pylint: disable=cell-var-from-loop", "1.0)))) else: raise ValueError('Unknown factor %s.' % name) return jnp.asarray(ret,", "programs != eos_token)), 1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params': params},", "bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode),", "denominator, # pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr # Calculate", "pytype: disable=attribute-error import collections import functools import json import os", "pred_denominator))) # Record beam search results as text summaries. message", "= (programs.shape[0], max_decode_len) dtype = config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0),", "= jax.tree_map( lambda x: pad_examples(x, padded_size), pred_batch) inputs, outputs, programs", "inputs, outputs, programs) return compute_metrics(logits, programs, weights) def initialize_cache(inputs, outputs,", "dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation and training. eval_ds", "import json import os import random import sys import time", "model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic", "# pylint: disable=bare-except predictions.append('Did not compile') logging.info('ios: %s', ios[-1]) logging.info('target:", "batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation and training. eval_ds =", "the starting constant for the lr schedule. factors: a string", "License for the specific language governing permissions and # limitations", "compute_metrics(logits, programs, weights) metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng", "is expanded in-place # rather than tiled. flat_encoded = decode.flat_batch_beam_expand(", "%s.' % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits,", "splitting inside the top pmap, rather # than handling it", "jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every': ret", "from latent_programmer import decode from latent_programmer import models as base_models", "> 0) or is_last_step: if jax.host_id() == 0: # Save", "checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) # Grab", "# --------------------------------------------------------------------------- logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern: raise ValueError('Must specify", "to handle a batch size equal to # batch_size *", "many steps to warm up for in the warmup schedule.", "alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search returns [n_batch,", "pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() == 0: logging.info('Evaluation time: %.4f", "else 'fast' logging.info( 'Prediction time, %s (beam %d): %.4f s,", "inputs, outputs, cache, beam_size, eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict translation", "not compile' top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format( index, decoded_program,", "and flatten batch dimensions.\"\"\" n_device, n_batch, *remaining_dims = x.shape return", "%s (beam %d): %.4f s, step %d, score %.4f', slow_or_fast,", "rng, init_rng = jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)(", "eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator =", "jax.tree_map( lambda x: pad_examples(x, padded_size), pred_batch) inputs, outputs, programs =", "state from last checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str),", "training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for", "pylint: disable=bare-except predictions.append('Did not compile') logging.info('ios: %s', ios[-1]) logging.info('target: %s',", "Interprets factors in the factors string which can consist of:", "Every k steps decay the learning rate by decay_factor. *", "assert start_step == FLAGS.finetune_start_step # Replicate optimizer. optimizer = jax_utils.replicate(optimizer)", "PRNG splitting inside the top pmap, rather # than handling", "factors='constant') p_train_step = jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step", "flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to logits from", "of steps between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of steps between", "beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search returns", "init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def", "1000, 'Number of steps between training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number", "in increasing order of log-probability. return beam_seqs # Util functions", "# Util functions for prediction # ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size):", "import tokens as dsl_tokens sys.path.append('../../') gfile = tf.io.gfile FLAGS =", "weights) mean_loss = loss / weight_sum return mean_loss, logits step", "# Decrease batch of predict dataset to handle beam search.", "filepattern to dataset.') # Training dataset. logging.info('Loading dataset from %s',", "lr=FLAGS.lr) # Get hyperparmaters if FLAGS.xm_parameters: for key, value in", "FLAGS.finetune_start_step <= 0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant", "mask to use. Options are: baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention',", "def decode_str(s): \"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id in", "axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None:", "io_vocab_size = len(char_id_table) + 1 # For padding. program_vocab_size =", "= jnp.where(outputs > 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask,", "summary = jax.tree_map( lambda x: x / denominator, # pylint:", "tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special attention only makes", "enumerate(dsl.CHARACTER)} char_id_table = {char: id for id, char in id_char_table.items()}", "to dataset.') # Training dataset. logging.info('Loading dataset from %s', FLAGS.dataset_filepattern)", "# beam search over possible sequences given input encoding. beam_seqs,", "and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict =", "flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] #", "doesn't work when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not", "{'params': params, 'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache", "model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of attention mask to", "flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] # Remove singleton sequence-length", "tokens.\"\"\" def decode_str(s): \"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id", "return acc.sum(), normalizing_factor def compute_metrics(logits, targets, weights): \"\"\"Compute summary metrics.\"\"\"", "Record beam search results as text summaries. message = []", "== FLAGS.finetune_start_step # Replicate optimizer. optimizer = jax_utils.replicate(optimizer) # TODO(jxihong):", "> 0, jnp.logical_and(programs != config.base_config.bos_token, programs != eos_token)), 1, 0).astype(jnp.float32)", "FLAGS.xm_parameters: for key, value in json.loads(FLAGS.xm_parameters).items(): if key not in", "if FLAGS.finetune_start_step > 0: logging.info('Checking that start_step (%s) == finetune_start_step", "checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic metric handling.", "tock summary_writer.scalar('train/steps per second', steps_per_sec, step) for key, val in", "compile') logging.info('ios: %s', ios[-1]) logging.info('target: %s', targets[-1]) beams_log = []", "a string with factors separated by '*' that defines the", "FLAGS.max_program_length) # Setup DSL # --------------------------------------------------------------------------- # Build token tables.", "- 1 # Save a Checkpoint if (step % FLAGS.checkpoint_freq", "* weights normalizing_factor = weights.sum() return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits,", "metrics_all.append(metrics) is_last_step = step == FLAGS.num_train_steps - 1 # Save", "functions for prediction # ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand batch", "checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of attention mask to use.", "OF ANY KIND, either express or implied. # See the", "% FLAGS.predict_freq == 0) or is_last_step: logging.info('Gathering beam search metrics.')", "tile_dims = [1] * len(x.shape) tile_dims[0] = batch_pad return np.concatenate([x,", "[] for inp, out in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return", "See the License for the specific language governing permissions and", "search metrics.') for beam_size in [1, 5, 10, 20, 50]:", "= desired_batch_size - x.shape[0] tile_dims = [1] * len(x.shape) tile_dims[0]", "use special relative attention computation for ' 'BOS tokens.') _internal", "of max(step, warmup_steps) * decay_every: Every k steps decay the", "to in writing, software # distributed under the License is", "disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr # Calculate (clipped) perplexity after", "batch item's data is expanded in-place # rather than tiled.", "for finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap(", "num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False,", "= jax.random.fold_in(rng, jax.host_id()) rng, init_rng = jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config)", "optimizer, metrics, dropout_rng = p_train_step( optimizer, inputs, outputs, programs, dropout_rng=dropout_rng)", "batch by padding instead of dropping it. cur_pred_batch_size = pred_batch[0].shape[0]", "# ----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs, programs, learning_rate_fn, config, dropout_rng):", "padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset = dataset.padded_batch(", "decode_program(beam).to_string() except: # pylint: disable=bare-except decoded_program = 'Did not compile'", "optimizer = optimizer_def.create(initial_variables['params']) del initial_variables # Don't keep a copy", "summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step)", "def compute_metrics(logits, targets, weights): \"\"\"Compute summary metrics.\"\"\" loss, weight_sum =", "or agreed to in writing, software # distributed under the", "of predict dataset to handle beam search. predict_ds = eval_ds.unbatch().padded_batch(", "base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters,", "None # Program does not compile. # Load Dataset #", "decay the learning rate. steps_per_cycle: Steps per cycle when using", "config=config).apply( {'params': params, 'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode)", "last step. start_step = int(optimizer.state.step) logging.info('Found model checkpointed at step", "= {char: id for id, char in id_char_table.items()} id_token_table, token_id_table", "f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to tensorboard. if jax.host_id()", "(FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL # --------------------------------------------------------------------------- # Build token", "return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted accuracy", "BOS special attention only makes sense if we are using", "= None, -1 # predicted shape [beam_size, length] for beam", "(step and step % FLAGS.eval_freq == 0) or is_last_step: logging.info('Gathering", "of steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial", "targets. Args: logits: `[batch, length, num_classes]` float array. targets: categorical", "the warmup schedule. decay_factor: The amount to decay the learning", "= time.time() pred_acc = 0 pred_denominator = 0 ios, targets,", "compliance with the License. # You may obtain a copy", "x / denominator, # pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr", "`[batch, length, num_classes]` float array. targets: categorical targets `[batch, length]`", "step) summary_writer.flush() # Beam search metrics. if (step and step", "embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to use special relative attention computation", "is_last_step: logging.info('Gathering training metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean()", "'baseline'): raise ValueError( \"bos_special_attention doesn't work when use_relative_attention={} and \"", "'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of layers.') flags.DEFINE_integer('num_layers', 3, 'Number", "= acc * weights normalizing_factor = weights.sum() return acc.sum(), normalizing_factor", "dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset = dataset.padded_batch( batch_size,", "steps between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of steps between prediction", "top pmap, rather # than handling it outside in the", "sequence-length dimension: # [batch * beam, 1, vocab] --> [batch", "not use this file except in compliance with the License.", "weight_sum = compute_weighted_cross_entropy(logits, programs, weights) mean_loss = loss / weight_sum", "return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted program", "flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern", "one device per host.\"\"\" host2devices = collections.defaultdict(list) for d in", "you may not use this file except in compliance with", "slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits from decoder model.\"\"\"", "common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics =", "logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern to", "p_train_step( optimizer, inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step", "pylint: disable=bare-except pass if best_score >= len(inputs): # Found solution.", "slice.\"\"\" batch_pad = desired_batch_size - x.shape[0] tile_dims = [1] *", "io and program token sequences (for eval). def decode_io(inputs, outputs):", "/ weight_sum return mean_loss, logits step = optimizer.state.step lr =", "0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs) return", "our decoder model to handle a batch size equal to", "models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs", "beam_size), all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush() if", "%s', '\\n'.join(beams_log)) top_of_beam = [] for index, beam in enumerate(beams[:-5:-1]):", "FLAGS.restore_checkpoints: # Restore unreplicated optimizer + model state from last", "= create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn,", "= [1] * len(x.shape) tile_dims[0] = batch_pad return np.concatenate([x, np.tile(x[-1],", "'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush()", "hparam_str)) batch_size = FLAGS.per_device_batch_size * n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task,", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation and", "finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore from existing model checkpoints.')", "map(tohost, (inputs, outputs, programs)) pred_denominator += programs.shape[0] for i, beams", "% name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits, targets,", "raise ValueError('Must specify filepattern to dataset.') # Training dataset. logging.info('Loading", "8): text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of", "def post_pmap(xs): return jax.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def", "== finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step #", "1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs)", "n_batch, *remaining_dims = x.shape return x.reshape((n_device * n_batch,) + tuple(remaining_dims))", "axis=0) def tohost(x): \"\"\"Collect batches from all devices to host", "by square root of max(step, warmup_steps) * decay_every: Every k", "/ n_devices) * n_devices) # pylint: disable=cell-var-from-loop pred_batch = jax.tree_map(", "+= programs.shape[0] for i, beams in enumerate(predicted): inps, outs =", "jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs) def post_pmap(xs): return", "rate function.\"\"\" ret = 1.0 for name in factors: if", "== 0: logging.info('Train in step: %d, loss: %.4f', step, summary['loss'])", "%.4f', step, summary['loss']) tock = time.time() steps_per_sec = FLAGS.log_freq /", "= jax.random.split(rng, jax.local_device_count()) del rng metrics_all = [] tick =", "jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record beam search results as text", "for index, beam in enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string() except:", "loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for key, val in eval_summary.items():", "from flax.metrics import tensorboard from flax.training import checkpoints from flax.training", "tick = tock summary_writer.scalar('train/steps per second', steps_per_sec, step) for key,", "grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad", "base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention)", "value, * linear_warmup: interpreted as linear warmup until warmup_steps, *", "= jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator') summary = jax.tree_map( lambda", "/ all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush() if __name__ ==", "FLAGS.slow_decode, 'Fast decoding is not implemented yet.' if FLAGS.finetune_start_step <=", "p_pred_step = jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch',", "io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table,", "steps decay the learning rate by decay_factor. * cosine_decay: Cyclic", "by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.", "* n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size,", "warm up for in the warmup schedule. decay_factor: The amount", "DSL # --------------------------------------------------------------------------- # Build token tables. id_char_table = {i+1:", "= [] tick = time.time() for step in range(start_step, FLAGS.num_train_steps):", "--> [batch * beam, 1, vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer(", "perplexity after averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id()", "jnp.where(programs > 0, 1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function used", "# Get metrics. metrics = compute_metrics(logits, programs, weights) metrics['learning_rate'] =", "'\\n'.join(beams_log)) top_of_beam = [] for index, beam in enumerate(beams[:-5:-1]): try:", "{'learning_rate': float}, the step-dependent lr. \"\"\" factors = [n.strip() for", "until warmup_steps, * rsqrt_decay: divide by square root of max(step,", "for prediction # ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand batch to", "[batch * beam, 1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params},", "batch of predict dataset to handle beam search. predict_ds =", "t_inference_start = time.time() pred_acc = 0 pred_denominator = 0 ios,", "Reset metric accumulation for next evaluation cycle. metrics_all = []", "outputs)]) if score > best_score: best_p, best_score = p, score", "for ' 'BOS tokens.') _internal = False if not _internal:", "--> [batch * beam, 1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params':", "set up our decoder model to handle a batch size", "for a given input shape and max decode length.\"\"\" target_shape", "n_device, n_batch, *remaining_dims = x.shape return x.reshape((n_device * n_batch,) +", "1] Returns: Tuple of scalar accuracy and batch normalizing factor.", "t_inference_start, step, all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc /", "' 'BOS tokens.') _internal = False if not _internal: flags.DEFINE_string('xm_parameters',", "def loss_fn(params): \"\"\"Loss function used for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply(", "if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'): raise", "flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache):", "inp in inputs] score = np.sum([p_out == out for p_out,", "%s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset =", "(pred_acc, pred_denominator))) # Record beam search results as text summaries.", "summary_writer.scalar('train/steps per second', steps_per_sec, step) for key, val in summary.items():", "logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer =", "function learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr. \"\"\"", "tick = time.time() for step in range(start_step, FLAGS.num_train_steps): inputs, outputs,", "# python3 \"\"\"Train seq-to-seq model on random supervised training tasks.\"\"\"", "out for p_out, out in zip(p_outs, outputs)]) if score >", "function used for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs,", "= jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs != config.base_config.bos_token, programs !=", "beam in predicted[::-1]: try: p = parse_beam_fn(beam) p_outs = [p(inp)", "from flax import linen as nn from flax import optim", "0 if FLAGS.restore_checkpoints: # Restore unreplicated optimizer + model state", "time, %s (beam %d): %.4f s, step %d, score %.4f',", "constant: interpreted as the constant value, * linear_warmup: interpreted as", "to host and flatten batch dimensions.\"\"\" n_device, n_batch, *remaining_dims =", "s if c_id > 0]) inps, outs = [], []", "float array. targets: categorical targets `[batch, length]` int array. weights:", "disable=bare-except pass if best_score >= len(inputs): # Found solution. break", "models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else:", "batch_size * beam_size, where each batch item's data is expanded", "if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get", "copy of the initial model. start_step = 0 if FLAGS.restore_checkpoints:", "loss and batch normalizing factor. \"\"\" if logits.ndim != targets.ndim", "Options are: baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to", "beam search on a batch.\"\"\" # Prepare transformer fast-decoder call", "score > best_score: best_p, best_score = p, score except: #", "if jax.host_id() == 0: logging.info('Evaluation time: %.4f s step %d,", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "tokens as dsl_tokens sys.path.append('../../') gfile = tf.io.gfile FLAGS = flags.FLAGS", "and training. eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease batch of predict", "cache = (p_init_cache(inputs, outputs, programs) if not FLAGS.slow_decode else None)", "new_vars['cache'] # Remove singleton sequence-length dimension: # [batch * beam,", "beam in beams: try: beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except beams_log.append('Did", "step %d.', start_step) if FLAGS.finetune_start_step > 0: logging.info('Checking that start_step", "FLAGS.attention_mask_type == 'baseline'): raise ValueError( \"bos_special_attention doesn't work when use_relative_attention={}", "def step_fn(step): \"\"\"Step to learning rate function.\"\"\" ret = 1.0", "except: # pylint: disable=bare-except pass if best_score >= len(inputs): #", "Metrics if (step and step % FLAGS.log_freq == 0) or", "of program tasks in a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of", "baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to use relative", "outputs, programs) return compute_metrics(logits, programs, weights) def initialize_cache(inputs, outputs, programs,", "+ jnp.cos(jnp.pi * (progress % 1.0)))) else: raise ValueError('Unknown factor", "FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del initial_variables", "linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide", "'Step the initial checkpoint should start at for ' 'finetuning,", "is_last_step: if jax.host_id() == 0: # Save unreplicated optimizer +", "= decode_io(inputs[i], outputs[i]) p, p_score = eval_predicted( beams, inps, outs,", "program tasks in a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output", "shape and max decode length.\"\"\" target_shape = (programs.shape[0], max_decode_len) dtype", "x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() ==", "(step and step % FLAGS.predict_freq == 0) or is_last_step: logging.info('Gathering", "kind of attention mask to use. Options are: baseline, '", "-jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is", "hparam_str_dict[key] = value hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for", "for ' 'finetuning, or -1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True,", "file except in compliance with the License. # You may", "2000, 'Number of steps between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of", "grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) # Get", "+ key, val, step) summary_writer.flush() # Reset metric accumulation for", "for i, beams in enumerate(predicted): inps, outs = decode_io(inputs[i], outputs[i])", "\"\"\"Expand batch to desired size by repeating last slice.\"\"\" batch_pad", "pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs) def", "= dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() # Build Model and Optimizer", "for (i, char) in enumerate(dsl.CHARACTER)} char_id_table = {char: id for", "= token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] # Parse io and program", "dataset.') # Training dataset. logging.info('Loading dataset from %s', FLAGS.dataset_filepattern) padded_shapes", "if not FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern to dataset.') #", "start_step = 0 if FLAGS.restore_checkpoints: # Restore unreplicated optimizer +", "bos_token = token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] # Parse io and", "return dsl.decode_program(program.tolist(), id_token_table) except: # pylint: disable=bare-except return None #", "logits from decoder model.\"\"\" # --> [batch * beam, 1,", "call for beam search: for beam search, we # need", "should start at for ' 'finetuning, or -1 if not", "A function learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr.", "'denominator': weight_sum, } metrics = jax.lax.psum(metrics, 'batch') return metrics #", "padding. program_vocab_size = len(token_id_table) + 1 bos_token = token_id_table[dsl.BOS] eos_token", "metrics_all = [] tick = time.time() for step in range(start_step,", "%s logits and %s targets' % (str(logits.shape), str(targets.shape))) onehot_targets =", "not implemented yet.' if FLAGS.finetune_start_step <= 0: learning_rate_fn = create_learning_rate_scheduler(", "jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every': ret *= (decay_factor**(step //", "= [] # Evaluation Metrics if (step and step %", "ios, targets, predictions, top_of_beams = [], [], [], [] for", "{targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to", "method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] # Remove singleton sequence-length dimension: #", "learning_rate(step): float -> {'learning_rate': float}, the step-dependent lr. \"\"\" factors", "flax.training import checkpoints from flax.training import common_utils import jax import", "disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() == 0: logging.info('Evaluation time: %.4f s", "in range(start_step, FLAGS.num_train_steps): inputs, outputs, programs = common_utils.shard(next(train_iter)) optimizer, metrics,", "KIND, either express or implied. # See the License for", "Copyright 2021 The Google Research Authors. # # Licensed under", "models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type,", "not _internal: flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter search.') def create_learning_rate_scheduler(", "factor. \"\"\" if logits.ndim != targets.ndim + 1: raise ValueError('Incorrect", "rather # than handling it outside in the training loop", "(p_init_cache(inputs, outputs, programs) if not FLAGS.slow_decode else None) predicted =", "- warmup_steps)) elif name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret", "a copy of the initial model. start_step = 0 if", "== 0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size =", "* rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate schedule.", "# Constant LR for finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant')", "flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token", "to use. Options are: baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True,", "'decay_every': ret *= (decay_factor**(step // steps_per_decay)) elif name == 'cosine_decay':", "a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output strings per task.')", "per host.\"\"\" host2devices = collections.defaultdict(list) for d in jax.devices(): host2devices[d.host_id].append(d)", "function, run a # beam search over possible sequences given", "bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig(", "if cur_pred_batch_size % n_devices: padded_size = int( np.ceil(cur_pred_batch_size / n_devices)", "= common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics", "in inputs] score = np.sum([p_out == out for p_out, out", "lambda x: pad_examples(x, padded_size), pred_batch) inputs, outputs, programs = common_utils.shard(pred_batch)", "to the devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights = jnp.where(programs", "for k in host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'),", "(the \"License\"); # you may not use this file except", "Build token tables. id_char_table = {i+1: char for (i, char)", "eval_ds.as_numpy_iterator(): inputs, outputs, programs = common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs,", "10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() # Build", "%d, loss: %.4f', step, summary['loss']) tock = time.time() steps_per_sec =", "square root of max(step, warmup_steps) * decay_every: Every k steps", "ValueError('Must specify filepattern to dataset.') # Training dataset. logging.info('Loading dataset", "factors in the factors string which can consist of: *", "limitations under the License. # python3 \"\"\"Train seq-to-seq model on", "acc = acc * weights normalizing_factor = weights.sum() return acc.sum(),", "tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode),", "batch of program tasks.\"\"\" # We handle PRNG splitting inside", "relative attention computation for ' 'BOS tokens.') _internal = False", "tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to logits from decoder model.\"\"\" #", "hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())])", "restore from existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of", "deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng", "--------------------------------------------------------------------------- logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern", "AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden", "tokens.') _internal = False if not _internal: flags.DEFINE_string('xm_parameters', None, 'String", "of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to tensorboard. if jax.host_id() ==", "# # Unless required by applicable law or agreed to", "as tf from latent_programmer import decode from latent_programmer import models", "x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate", "# Grab last step. start_step = int(optimizer.state.step) logging.info('Found model checkpointed", "not None: loss = loss * weights normalizing_factor = weights.sum()", "search: for beam search, we # need to set up", "warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate schedule. Interprets factors", "relative attention # and it's not the baseline. if FLAGS.bos_special_attention", "= [p(inp) for inp in inputs] score = np.sum([p_out ==", "results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.') flags.DEFINE_integer('num_eval_steps', 10,", "encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand(", "4, 'Number of layers.') flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.')", "attention only makes sense if we are using relative attention", "FLAGS.num_train_steps): inputs, outputs, programs = common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng =", "targets, weights=None): \"\"\"Compute weighted cross entropy and entropy for log", "at step %d.', start_step) if FLAGS.finetune_start_step > 0: logging.info('Checking that", "(f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) #", "if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore from existing", "1 bos_token = token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] # Parse io", "acc * weights normalizing_factor = weights.sum() return acc.sum(), normalizing_factor def", "implied. # See the License for the specific language governing", "tf from latent_programmer import decode from latent_programmer import models as", "not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore from existing model", "flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of", "above-defined single-step decoder function, run a # beam search over", "FLAGS.finetune_start_step # Replicate optimizer. optimizer = jax_utils.replicate(optimizer) # TODO(jxihong): Implement", "to learning rate function.\"\"\" ret = 1.0 for name in", "in factors: if name == 'constant': ret *= base_learning_rate elif", "(step % FLAGS.checkpoint_freq == 0 and step > 0) or", "steps_per_cycle=100000): \"\"\"Creates learning rate schedule. Interprets factors in the factors", "# [batch * beam, 1, vocab] --> [batch * beam,", "flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256,", "programs, learning_rate_fn, config, dropout_rng): \"\"\"Train on batch of program tasks.\"\"\"", "train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() # Build Model and", "= base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim,", "rather than tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs,", "tock = time.time() steps_per_sec = FLAGS.log_freq / (tock - tick)", "outs = decode_io(inputs[i], outputs[i]) p, p_score = eval_predicted( beams, inps,", "x.shape[0] tile_dims = [1] * len(x.shape) tile_dims[0] = batch_pad return", "work when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir):", "loss * weights normalizing_factor = weights.sum() return loss.sum(), normalizing_factor def", "max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train Loop #", "weights): \"\"\"Compute summary metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights)", "handle a batch size equal to # batch_size * beam_size,", "# Training Metrics if (step and step % FLAGS.log_freq ==", "search returns [n_batch, n_beam, n_length] with beam dimension # sorted", "= (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL # --------------------------------------------------------------------------- # Build", "learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses", "optimizer_def.create(initial_variables['params']) del initial_variables # Don't keep a copy of the", "jax.random.split(rng, jax.local_device_count()) del rng metrics_all = [] tick = time.time()", "# Split evaluation and training. eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease", "sense if we are using relative attention # and it's", "= jax.random.split(dropout_rng) weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32) def", "flat_cache): \"\"\"Token slice to logits from decoder model.\"\"\" # -->", "from absl import app from absl import flags from absl", "for batches in predict_ds.as_numpy_iterator(): pred_batch = batches # Handle final", "inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step == FLAGS.num_train_steps", "float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi *", "from latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill import tokens as dsl_tokens", "dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() # Build Model and Optimizer #", "input_pipeline from latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill import tokens as", "== 'baseline'): raise ValueError( \"bos_special_attention doesn't work when use_relative_attention={} and", "outside in the training loop - doing the # latter", "= create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant LR for finetuning. learning_rate_fn", "except: # pylint: disable=bare-except predictions.append('Did not compile') logging.info('ios: %s', ios[-1])", "ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress", "eval_summary = jax.tree_map( lambda x: x / eval_denominator, # pylint:", "is not implemented yet.' if FLAGS.finetune_start_step <= 0: learning_rate_fn =", "Unless required by applicable law or agreed to in writing,", "flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice", "outputs, programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)", "# Remove singleton sequence-length dimension: # [batch * beam, 1,", "% (str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor =", "k steps decay the learning rate by decay_factor. * cosine_decay:", "beam_size, time.time() - t_inference_start, step, all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast,", "the specific language governing permissions and # limitations under the", "the top pmap, rather # than handling it outside in", "rate. steps_per_cycle: Steps per cycle when using cosine decay. Returns:", "pred_batch = batches # Handle final odd-sized batch by padding", "parameter. Args: base_learning_rate: float, the starting constant for the lr", "for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16,", "if FLAGS.finetune_start_step <= 0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: #", "= [host2devices[k][0] for k in host2devices] host_psum = jax.pmap(lambda x:", "outputs, cache, beam_size) predicted = tohost(predicted) inputs, outputs, programs =", "warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps)", "is_last_step = step == FLAGS.num_train_steps - 1 # Save a", "unreplicated optimizer + model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer),", "time: %.4f s step %d, loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss'])", "learning rate by. steps_per_decay: How often to decay the learning", "models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs, rngs={'dropout': dropout_rng}) loss, weight_sum", "to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number", "config): \"\"\"Initialize a cache for a given input shape and", "decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode)", "progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret *=", "rsqrt_decay: divide by square root of max(step, warmup_steps) * decay_every:", "input shape and max decode length.\"\"\" target_shape = (programs.shape[0], max_decode_len)", "eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter =", "char_id_table) dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation", "% FLAGS.log_freq == 0) or is_last_step: logging.info('Gathering training metrics.') metrics_all", "program = program[program != bos_token] try: return dsl.decode_program(program.tolist(), id_token_table) except:", "logging.info('Train in step: %d, loss: %.4f', step, summary['loss']) tock =", "FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL # ---------------------------------------------------------------------------", "= dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) + 1 # For padding.", "log probs and targets. Args: logits: `[batch, length, num_classes]` float", "is not None: loss = loss * weights normalizing_factor =", "warmup_steps) elif name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step -", "padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation and training. eval_ds = dataset.take(FLAGS.num_eval_steps)", "= train_ds.as_numpy_iterator() # Build Model and Optimizer # --------------------------------------------------------------------------- use_dropout", "t_evaluation_start = time.time() eval_metrics = [] for batches in eval_ds.as_numpy_iterator():", "best_score: best_p, best_score = p, score except: # pylint: disable=bare-except", "after averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() ==", "or is_last_step: logging.info('Gathering training metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr =", "for step in range(start_step, FLAGS.num_train_steps): inputs, outputs, programs = common_utils.shard(next(train_iter))", "%d.', start_step) if FLAGS.finetune_start_step > 0: logging.info('Checking that start_step (%s)", "results as text summaries. message = [] for n in", "'Fast decoding is not implemented yet.' if FLAGS.finetune_start_step <= 0:", "weights=None): \"\"\"Compute weighted accuracy for log probs and targets. Args:", "%d): %.4f s, step %d, score %.4f', slow_or_fast, beam_size, time.time()", "best_p, best_score = None, -1 # predicted shape [beam_size, length]", "stalls to the devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights =", "= False if not _internal: flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter", "return mean_loss, logits step = optimizer.state.step lr = learning_rate_fn(step) grad_fn", "%s', targets[-1]) beams_log = [] for beam in beams: try:", "* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: base_learning_rate:", "--------------------------------------------------------------------------- # Build token tables. id_char_table = {i+1: char for", "flax import jax_utils from flax import linen as nn from", "initial checkpoint should start at for ' 'finetuning, or -1", "'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic metric handling. # Training", "id, char in id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size =", "using cosine decay. Returns: A function learning_rate(step): float -> {'learning_rate':", "p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums =", "token sequences (for eval). def decode_io(inputs, outputs): \"\"\"Decode io examples", "hparam_str), optimizer) # Grab last step. start_step = int(optimizer.state.step) logging.info('Found", "= optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics. metrics = compute_metrics(logits, programs,", "metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum,", "max_decode_len, config): \"\"\"Initialize a cache for a given input shape", "# predicted shape [beam_size, length] for beam in predicted[::-1]: try:", "char_id_table = {char: id for id, char in id_char_table.items()} id_token_table,", "* (progress % 1.0)))) else: raise ValueError('Unknown factor %s.' %", "cache, beam_size, eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict translation with fast", "Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern: raise", "break return best_p, best_score def shorten(key): splits = key.split('_') return", "= common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng = p_train_step( optimizer, inputs, outputs,", "vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache': flat_cache},", "with beam dimension # sorted in increasing order of log-probability.", "use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type,", "psum on in_tree's leaves over one device per host.\"\"\" host2devices", "np import tensorflow.compat.v2 as tf from latent_programmer import decode from", "return ''.join([id_char_table[c_id] for c_id in s if c_id > 0])", "tasks.\"\"\" # pytype: disable=wrong-arg-count # pytype: disable=attribute-error import collections import", "or -1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore", "predictions.append('Did not compile') logging.info('ios: %s', ios[-1]) logging.info('target: %s', targets[-1]) beams_log", "return metrics # Train / eval / decode step functions.", "char) in enumerate(dsl.CHARACTER)} char_id_table = {char: id for id, char", "of log-probability. return beam_seqs # Util functions for prediction #", "= p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums", "if jax.host_id() == 0: slow_or_fast = 'slow' if FLAGS.slow_decode else", "factors.split('*')] def step_fn(step): \"\"\"Step to learning rate function.\"\"\" ret =", "when using cosine decay. Returns: A function learning_rate(step): float ->", "%.4f s, step %d, score %.4f', slow_or_fast, beam_size, time.time() -", "dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) + 1 # For padding. program_vocab_size", "# Main Train Loop # --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count())", "of: * constant: interpreted as the constant value, * linear_warmup:", "strings.') flags.DEFINE_string('save_dir', None, 'Directory to save results to.') flags.DEFINE_integer('num_train_steps', 2000000,", "jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr,", "summary_writer.flush() # Beam search metrics. if (step and step %", "programs, weights) def initialize_cache(inputs, outputs, programs, max_decode_len, config): \"\"\"Initialize a", "jnp.prod(jnp.asarray(targets.shape)) if weights is not None: loss = loss *", "\"\"\" factors = [n.strip() for n in factors.split('*')] def step_fn(step):", "seed for training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay", "steps_per_cycle parameter. Args: base_learning_rate: float, the starting constant for the", "some stalls to the devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights", "--------------------------------------------------------------------------- use_dropout = False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True,", "jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5", "weighted cross entropy and entropy for log probs and targets.", "= jax.tree_map( lambda x: x / denominator, # pylint: disable=cell-var-from-loop", "grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) #", "which can consist of: * constant: interpreted as the constant", "# --> [batch * beam, 1, vocab] flat_logits, new_vars =", "key, val in summary.items(): summary_writer.scalar('train/' + key, val, step) summary_writer.flush()", "jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs != config.base_config.bos_token, programs != eos_token)),", "jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad,", "[batch * beam, 1, vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply(", "Util functions for prediction # ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand", "p_pred_step(optimizer.target, inputs, outputs, cache, beam_size) predicted = tohost(predicted) inputs, outputs,", "absl import flags from absl import logging from flax import", "p_train_step = jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step =", "base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim,", "os import random import sys import time from absl import", "summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size *", "* n_devices) # pylint: disable=cell-var-from-loop pred_batch = jax.tree_map( lambda x:", "training metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums =", "ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every': ret *=", "[p(inp) for inp in inputs] score = np.sum([p_out == out", "factor %s.' % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn def", "You may obtain a copy of the License at #", "np.random.choice(np.arange(len(predictions)), 8): text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top", "[batch * beam, 1, vocab] --> [batch * beam, vocab]", "devices to host and flatten batch dimensions.\"\"\" n_device, n_batch, *remaining_dims", "desired_batch_size): \"\"\"Expand batch to desired size by repeating last slice.\"\"\"", "jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics. metrics", "each batch item's data is expanded in-place # rather than", "assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.' if FLAGS.finetune_start_step", "eval_metrics = [] for batches in eval_ds.as_numpy_iterator(): inputs, outputs, programs", "params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def tokens_ids_to_logits(flat_ids,", "dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of layers.') flags.DEFINE_integer('num_layers', 3, 'Number of", "FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) #", "loss, 'accuracy': acc, 'denominator': weight_sum, } metrics = jax.lax.psum(metrics, 'batch')", "np.sum([p_out == out for p_out, out in zip(p_outs, outputs)]) if", "= jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() == 0: logging.info('Train in step:", "target_shape = (programs.shape[0], max_decode_len) dtype = config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init(", "predicted = p_pred_step(optimizer.target, inputs, outputs, cache, beam_size) predicted = tohost(predicted)", "message.append(text) # Write to tensorboard. if jax.host_id() == 0: slow_or_fast", "jnp.ones(target_shape, dtype)) return initial_variables['cache'] def predict_step(params, inputs, outputs, cache, beam_size,", "jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step = jax.pmap( functools.partial(", "step == FLAGS.num_train_steps - 1 # Save a Checkpoint if", "= [], [], [], [] for batches in predict_ds.as_numpy_iterator(): pred_batch", "0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function used for training.\"\"\" logits =", "size by repeating last slice.\"\"\" batch_pad = desired_batch_size - x.shape[0]", "dimension # sorted in increasing order of log-probability. return beam_seqs", "from %s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s',", "cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size % n_devices: padded_size = int(", "= x.shape return x.reshape((n_device * n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree):", "x: x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id()", "accumulation for next evaluation cycle. metrics_all = [] # Evaluation", "n_devices) * n_devices) # pylint: disable=cell-var-from-loop pred_batch = jax.tree_map( lambda", "{'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32))", "= map(tohost, (inputs, outputs, programs)) pred_denominator += programs.shape[0] for i,", "eval_predicted( beams, inps, outs, parse_beam_fn=decode_program) if p_score >= len(inps): pred_acc", "!= bos_token] try: return dsl.decode_program(program.tolist(), id_token_table) except: # pylint: disable=bare-except", "inputs, outputs, programs = common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs, programs)", "* nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not", "Train Loop # --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count()) del rng", "attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config", "name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))", "eos_token = token_id_table[dsl.EOS] # Parse io and program token sequences", "beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits from", "np.tile(x[-1], tile_dims)], axis=0) def tohost(x): \"\"\"Collect batches from all devices", "weights.sum() return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted", "elif name == 'decay_every': ret *= (decay_factor**(step // steps_per_decay)) elif", "FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern to dataset.') # Training dataset.", "# Copyright 2021 The Google Research Authors. # # Licensed", "search over possible sequences given input encoding. beam_seqs, _ =", "step = optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True)", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "float, the starting constant for the lr schedule. factors: a", "== 0: slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast' logging.info(", "local devices for this host. n_devices = jax.local_device_count() if jax.host_id()", "= eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter", "for n in np.random.choice(np.arange(len(predictions)), 8): text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n'", "= 'slow' if FLAGS.slow_decode else 'fast' logging.info( 'Prediction time, %s", "# Training dataset. logging.info('Loading dataset from %s', FLAGS.dataset_filepattern) padded_shapes =", "m = models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng},", "1, vocab] --> [batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1)", "FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) #", "jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params': init_rng, 'dropout':", "len(inputs): # Found solution. break return best_p, best_score def shorten(key):", "shape %s logits and %s targets' % (str(logits.shape), str(targets.shape))) acc", "for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512,", "accuracy for log probs and targets. Args: logits: `[batch, length,", "during training.\"\"\" weights = jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs !=", "','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())]) # Number", "outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32)", "flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.') flags.DEFINE_integer('eval_freq', 2000,", "length]` int array. weights: None or array of shape [batch,", "need to set up our decoder model to handle a", "weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss", "1] Returns: Tuple of scalar loss and batch normalizing factor.", "parse_beam_fn=decode_program) if p_score >= len(inps): pred_acc += 1 ios.append(' ;", "+ 1: raise ValueError('Incorrect shapes. Got shape %s logits and", "= jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache = jax.pmap( functools.partial(", "save results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.') flags.DEFINE_integer('num_eval_steps',", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "0: # Save unreplicated optimizer + model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir,", "metrics for evaluation during training.\"\"\" weights = jnp.where( jnp.logical_and(programs >", "License. # You may obtain a copy of the License", "out in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs def", "program = program[:np.argmax(program == eos_token) + 1].astype(np.int32) program = program[program", "steps_per_decay: How often to decay the learning rate. steps_per_cycle: Steps", "'cosine_decay': progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret", "singleton sequence-length dimension: # [batch * beam, 1, vocab] -->", "decoded_program = decode_program(beam).to_string() except: # pylint: disable=bare-except decoded_program = 'Did", "+ model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) #", "eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train Loop", "\"\"\"Evaluate predicted program beams.\"\"\" best_p, best_score = None, -1 #", "the initial checkpoint should start at for ' 'finetuning, or", "in enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string() except: # pylint: disable=bare-except", "weights: None or array of shape [batch, length, 1] Returns:", "from absl import logging from flax import jax_utils from flax", "step_fn(step): \"\"\"Step to learning rate function.\"\"\" ret = 1.0 for", "programs) return compute_metrics(logits, programs, weights) def initialize_cache(inputs, outputs, programs, max_decode_len,", "- tick) tick = tock summary_writer.scalar('train/steps per second', steps_per_sec, step)", "factors separated by '*' that defines the schedule. warmup_steps: how", "warmup schedule. decay_factor: The amount to decay the learning rate", "padding instead of dropping it. cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size", "= compute_weighted_cross_entropy(logits, targets, weights) acc, _ = compute_weighted_accuracy(logits, targets, weights)", "rng = jax.random.fold_in(rng, jax.host_id()) rng, init_rng = jax.random.split(rng) m =", "learning rate schedule. Interprets factors in the factors string which", "16, 'Number of program tasks in a batch.') flags.DEFINE_integer('num_strings_per_task', 4,", "vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length),", "input encoding. beam_seqs, _ = decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size,", "(%s)', start_step, FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step # Replicate optimizer.", "shape [beam_size, length] for beam in predicted[::-1]: try: p =", "under the License. # python3 \"\"\"Train seq-to-seq model on random", "predict_ds.as_numpy_iterator(): pred_batch = batches # Handle final odd-sized batch by", "tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random seed for", "(step - warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 *", "logits and %s targets' % (str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets,", "inp, out in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs", "tokens.\"\"\" program = program[:np.argmax(program == eos_token) + 1].astype(np.int32) program =", "> 0, 1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function used for", "[batch, length, 1] Returns: Tuple of scalar accuracy and batch", "to # batch_size * beam_size, where each batch item's data", "n_devices = jax.local_device_count() if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter(", "/= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every': ret *= (decay_factor**(step", "decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to", "'Number of steps between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of steps", "host2devices = collections.defaultdict(list) for d in jax.devices(): host2devices[d.host_id].append(d) devices =", "and %s targets' % (str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1])", "Training Metrics if (step and step % FLAGS.log_freq == 0)", "s step %d, loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for key,", "loop - doing the # latter can add some stalls", "gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters if FLAGS.xm_parameters:", "targets[-1]) beams_log = [] for beam in beams: try: beams_log.append(decode_program(beam).to_string())", "FLAGS.eval_freq == 0) or is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start =", "interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by", "predicted program beams.\"\"\" best_p, best_score = None, -1 # predicted", "of program tasks.\"\"\" # We handle PRNG splitting inside the", "latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill import", "= value hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for k", "weights) metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng def eval_step(params,", "FLAGS.num_train_steps - 1 # Save a Checkpoint if (step %", "# --------------------------------------------------------------------------- # Build token tables. id_char_table = {i+1: char", "flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] # Remove singleton", "hyperparmaters if FLAGS.xm_parameters: for key, value in json.loads(FLAGS.xm_parameters).items(): if key", "len(token_id_table) + 1 bos_token = token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] #", "in input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory to save results to.')", "Constant LR for finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step", "x: x / denominator, # pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] =", "{char: id for id, char in id_char_table.items()} id_token_table, token_id_table =", "of layers.') flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode', True,", "(tock - tick) tick = tock summary_writer.scalar('train/steps per second', steps_per_sec,", "== 0) or is_last_step: logging.info('Gathering beam search metrics.') for beam_size", "(clipped) perplexity after averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if", "new_dropout_rng = jax.random.split(dropout_rng) weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32)", "(not FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'): raise ValueError( \"bos_special_attention doesn't", "val in eval_summary.items(): summary_writer.scalar('eval/' + key, val, step) summary_writer.flush() #", "False if not _internal: flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter search.')", "cross entropy and entropy for log probs and targets. Args:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "* beam, 1, vocab] --> [batch * beam, vocab] flat_logits", "[beam_size, length] for beam in predicted[::-1]: try: p = parse_beam_fn(beam)", "= models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits", "best_score >= len(inputs): # Found solution. break return best_p, best_score", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "for the specific language governing permissions and # limitations under", "jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache = jax.pmap( functools.partial( initialize_cache,", "programs.shape[0] for i, beams in enumerate(predicted): inps, outs = decode_io(inputs[i],", "[], [] for batches in predict_ds.as_numpy_iterator(): pred_batch = batches #", "rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle", "language governing permissions and # limitations under the License. #", "at for ' 'finetuning, or -1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints',", "0: logging.info('Checking that start_step (%s) == finetune_start_step (%s)', start_step, FLAGS.finetune_start_step)", "defines the schedule. warmup_steps: how many steps to warm up", "not compile') logging.info('ios: %s', ios[-1]) logging.info('target: %s', targets[-1]) beams_log =", "factors = [n.strip() for n in factors.split('*')] def step_fn(step): \"\"\"Step", "required by applicable law or agreed to in writing, software", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# Prepare transformer fast-decoder call for beam search: for beam", "'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif name ==", "to handle beam search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)),", "optimizer = jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast decoding. assert FLAGS.slow_decode,", "* rsqrt_decay: divide by square root of max(step, warmup_steps) *", "range(start_step, FLAGS.num_train_steps): inputs, outputs, programs = common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng", "None, 'String specifying hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant *", "# Save a Checkpoint if (step % FLAGS.checkpoint_freq == 0", "/ 10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() #", "common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary =", "100, 'Maximum number of tokens in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum", "models as base_models from latent_programmer.decomposition_transformer_attention import decomposition_models as models from", "not compile. # Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.') if", "= dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split evaluation and training.", "start_step == FLAGS.finetune_start_step # Replicate optimizer. optimizer = jax_utils.replicate(optimizer) #", "or array of shape [batch, length, 1] Returns: Tuple of", "special attention only makes sense if we are using relative", "agreed to in writing, software # distributed under the License", "if not _internal: flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter search.') def", "run a # beam search over possible sequences given input", "model on random supervised training tasks.\"\"\" # pytype: disable=wrong-arg-count #", "x.shape return x.reshape((n_device * n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute", "in id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) +", "in_tree's leaves over one device per host.\"\"\" host2devices = collections.defaultdict(list)", "pylint: disable=bare-except beams_log.append('Did not compile') logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam", "distributed under the License is distributed on an \"AS IS\"", "beam, 1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded,", "eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search returns [n_batch, n_beam, n_length]", "if weights is not None: loss = loss * weights", "last checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) #", "jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0] for k in host2devices] host_psum", "Optimizer # --------------------------------------------------------------------------- use_dropout = False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size,", "import jax.numpy as jnp import numpy as np import tensorflow.compat.v2", "= dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters if FLAGS.xm_parameters: for key,", "return best_p, best_score def shorten(key): splits = key.split('_') return ''.join(s[0]", "n_beam, n_length] with beam dimension # sorted in increasing order", "Replicate optimizer. optimizer = jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast decoding.", "programs, weights) mean_loss = loss / weight_sum return mean_loss, logits", "jax.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs,", "in host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices)", "learning_rate=lr) # Get metrics. metrics = compute_metrics(logits, programs, weights) metrics['learning_rate']", "flax import optim from flax.metrics import tensorboard from flax.training import", "'Use slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord", "flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output strings per task.') flags.DEFINE_integer('max_program_length', 100,", "train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not", "Returns: Tuple of scalar loss and batch normalizing factor. \"\"\"", "when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir)", "start_step = int(optimizer.state.step) logging.info('Found model checkpointed at step %d.', start_step)", "= new_vars['cache'] # Remove singleton sequence-length dimension: # [batch *", "= jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params': init_rng,", "evaluation and training. eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease batch of", "= optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_,", "random.seed(FLAGS.seed) # BOS special attention only makes sense if we", "constant for the lr schedule. factors: a string with factors", "method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to", "baseline. if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'):", "= dataset.take(FLAGS.num_eval_steps) # Decrease batch of predict dataset to handle", "summary['loss']) tock = time.time() steps_per_sec = FLAGS.log_freq / (tock -", "schedule. decay_factor: The amount to decay the learning rate by.", "import jax import jax.numpy as jnp import numpy as np", "outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step == FLAGS.num_train_steps -", "length, 1] Returns: Tuple of scalar accuracy and batch normalizing", "scalar accuracy and batch normalizing factor. \"\"\" if logits.ndim !=", "in splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS", "between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of steps between prediction (beam", "dtype)) return initial_variables['cache'] def predict_step(params, inputs, outputs, cache, beam_size, eos_token,", "False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers,", "does not compile. # Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.')", "dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters if FLAGS.xm_parameters: for key, value", "schedule. factors: a string with factors separated by '*' that", "True, 'Whether to use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether", "import common_utils import jax import jax.numpy as jnp import numpy", "outputs, programs, max_decode_len, config): \"\"\"Initialize a cache for a given", "decoder model.\"\"\" # --> [batch * beam, 1, vocab] flat_logits,", "for inp in inputs] score = np.sum([p_out == out for", "saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial checkpoint should start at", "beam: %s', '\\n'.join(beams_log)) top_of_beam = [] for index, beam in", "\"\"\"Predict translation with fast decoding beam search on a batch.\"\"\"", "= [] for n in np.random.choice(np.arange(len(predictions)), 8): text = (f'ios:", "max(step, warmup_steps) * decay_every: Every k steps decay the learning", "compile. # Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.') if not", "characters in input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory to save results", "# need to set up our decoder model to handle", "1e-1, 'Decay factor for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding", "flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of attention mask to use. Options", "{'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs >", "length, num_classes]` float array. targets: categorical targets `[batch, length]` int", "'Whether to use special relative attention computation for ' 'BOS", "# Build token tables. id_char_table = {i+1: char for (i,", "decode_io(inputs, outputs): \"\"\"Decode io examples tokens.\"\"\" def decode_str(s): \"\"\"Decode string", "compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted cross entropy and entropy for", "eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator')", "initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step = jax.pmap( functools.partial( predict_step, eos_token=eos_token,", "' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to use relative positonal", "decay, uses steps_per_cycle parameter. Args: base_learning_rate: float, the starting constant", ">= len(inputs): # Found solution. break return best_p, best_score def", "str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits),", "targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None: acc", "outs = [], [] for inp, out in zip(inputs, outputs):", "linen as nn from flax import optim from flax.metrics import", "OR CONDITIONS OF ANY KIND, either express or implied. #", "inputs, outputs, programs, eos_token, config): \"\"\"Collect metrics for evaluation during", "import logging from flax import jax_utils from flax import linen", "for beam search: for beam search, we # need to", "how many steps to warm up for in the warmup", "outputs, parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\" best_p, best_score = None,", "pad_examples(x, padded_size), pred_batch) inputs, outputs, programs = common_utils.shard(pred_batch) cache =", "the License is distributed on an \"AS IS\" BASIS, #", "array of shape [batch, length, 1] Returns: Tuple of scalar", "programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step == FLAGS.num_train_steps - 1", "is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start = time.time() eval_metrics = []", "def eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\" best_p,", "val, step) summary_writer.flush() # Reset metric accumulation for next evaluation", "# batch_size * beam_size, where each batch item's data is", "message = [] for n in np.random.choice(np.arange(len(predictions)), 8): text =", "between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps between", "beams_log.append('Did not compile') logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam = []", "governing permissions and # limitations under the License. # python3", "beams: try: beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except beams_log.append('Did not compile')", "latent_programmer.tasks.robust_fill import tokens as dsl_tokens sys.path.append('../../') gfile = tf.io.gfile FLAGS", "[] for batches in predict_ds.as_numpy_iterator(): pred_batch = batches # Handle", "/ all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size),", "random import sys import time from absl import app from", "50]: t_inference_start = time.time() pred_acc = 0 pred_denominator = 0", "= jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,))", "0: slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast' logging.info( 'Prediction", "training tasks.\"\"\" # pytype: disable=wrong-arg-count # pytype: disable=attribute-error import collections", "devices = [host2devices[k][0] for k in host2devices] host_psum = jax.pmap(lambda", "= loss / weight_sum return mean_loss, logits step = optimizer.state.step", "eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig(", "+ model state from last checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir,", "top_of_beams = [], [], [], [] for batches in predict_ds.as_numpy_iterator():", "weights) def initialize_cache(inputs, outputs, programs, max_decode_len, config): \"\"\"Initialize a cache", "warmup_steps: how many steps to warm up for in the", "law or agreed to in writing, software # distributed under", "%.4f', slow_or_fast, beam_size, time.time() - t_inference_start, step, all_pred_acc / all_pred_denominator)", "weight_sum return mean_loss, logits step = optimizer.state.step lr = learning_rate_fn(step)", "time.time() steps_per_sec = FLAGS.log_freq / (tock - tick) tick =", "dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of layers.')", "steps to warm up for in the warmup schedule. decay_factor:", "step %d, loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for key, val", "def per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's leaves over one device", "'String specifying hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup", "\"\"\"Compute weighted cross entropy and entropy for log probs and", "normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted accuracy for log", "compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted accuracy for log probs and", "dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache'] def predict_step(params, inputs,", "for c_id in s if c_id > 0]) inps, outs", "uses steps_per_cycle parameter. Args: base_learning_rate: float, the starting constant for", "unreplicated optimizer + model state from last checkpoint. optimizer =", "-> {'learning_rate': float}, the step-dependent lr. \"\"\" factors = [n.strip()", "may obtain a copy of the License at # #", "logging.info('ios: %s', ios[-1]) logging.info('target: %s', targets[-1]) beams_log = [] for", "decode_io(inputs[i], outputs[i]) p, p_score = eval_predicted( beams, inps, outs, parse_beam_fn=decode_program)", "# --> [batch * beam, 1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply(", "initialize_cache(inputs, outputs, programs, max_decode_len, config): \"\"\"Initialize a cache for a", "id_token_table) except: # pylint: disable=bare-except return None # Program does", "instead of dropping it. cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size %", "schedule. warmup_steps: how many steps to warm up for in", "training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs, rngs={'dropout':", "0]) inps, outs = [], [] for inp, out in", "to tensorboard. if jax.host_id() == 0: slow_or_fast = 'slow' if", "jax.host_id()) rng, init_rng = jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables =", "common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator =", "dataset.') if not FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern to dataset.')", "may not use this file except in compliance with the", "import linen as nn from flax import optim from flax.metrics", "prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps between checkpoint", "out in zip(p_outs, outputs)]) if score > best_score: best_p, best_score", "import checkpoints from flax.training import common_utils import jax import jax.numpy", "i, beams in enumerate(predicted): inps, outs = decode_io(inputs[i], outputs[i]) p,", "time.time() pred_acc = 0 pred_denominator = 0 ios, targets, predictions,", "slow_or_fast, beam_size, time.time() - t_inference_start, step, all_pred_acc / all_pred_denominator) summary_writer.scalar(", "beam search: for beam search, we # need to set", "def shorten(key): splits = key.split('_') return ''.join(s[0] for s in", "= (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record(", "initial_variables = jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape,", "this file except in compliance with the License. # You", "jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache'] def", "= models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return", "step-dependent lr. \"\"\" factors = [n.strip() for n in factors.split('*')]", "learning rate function.\"\"\" ret = 1.0 for name in factors:", "= program[:np.argmax(program == eos_token) + 1].astype(np.int32) program = program[program !=", "search results as text summaries. message = [] for n", "key not in hparam_str_dict: hparam_str_dict[key] = value hparam_str = ','.join(['%s=%s'", "del rng metrics_all = [] tick = time.time() for step", "name == 'cosine_decay': progress = jnp.maximum(0.0, (step - warmup_steps) /", "fast decoding beam search on a batch.\"\"\" # Prepare transformer", "# # Licensed under the Apache License, Version 2.0 (the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "batch size equal to # batch_size * beam_size, where each", "# Record beam search results as text summaries. message =", "% n_devices: padded_size = int( np.ceil(cur_pred_batch_size / n_devices) * n_devices)", "'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for AdamW-style weight decay.')", "'constant': ret *= base_learning_rate elif name == 'linear_warmup': ret *=", "pred_denominator += programs.shape[0] for i, beams in enumerate(predicted): inps, outs", "train_step(optimizer, inputs, outputs, programs, learning_rate_fn, config, dropout_rng): \"\"\"Train on batch", "in the training loop - doing the # latter can", "= compute_metrics(logits, programs, weights) metrics['learning_rate'] = lr return new_optimizer, metrics,", "None, -1 # predicted shape [beam_size, length] for beam in", "axis_name='batch') p_pred_step = jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode),", "# sorted in increasing order of log-probability. return beam_seqs #", "if FLAGS.xm_parameters: for key, value in json.loads(FLAGS.xm_parameters).items(): if key not", "cache, beam_size) predicted = tohost(predicted) inputs, outputs, programs = map(tohost,", "def train_step(optimizer, inputs, outputs, programs, learning_rate_fn, config, dropout_rng): \"\"\"Train on", "config=eval_config), axis_name='batch') p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch')", "handling. # Training Metrics if (step and step % FLAGS.log_freq", "step functions. # ----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs, programs, learning_rate_fn,", "programs, eos_token, config): \"\"\"Collect metrics for evaluation during training.\"\"\" weights", "= compute_weighted_cross_entropy(logits, programs, weights) mean_loss = loss / weight_sum return", "sys.path.append('../../') gfile = tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed", "text summaries. message = [] for n in np.random.choice(np.arange(len(predictions)), 8):", "ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif name == 'rsqrt_normalized_decay':", "loss = loss * weights normalizing_factor = weights.sum() return loss.sum(),", "try: p = parse_beam_fn(beam) p_outs = [p(inp) for inp in", "beam dimension # sorted in increasing order of log-probability. return", "if logits.ndim != targets.ndim + 1: raise ValueError('Incorrect shapes. Got", "logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam = [] for index, beam", "inside the top pmap, rather # than handling it outside", "jax.local_device_count() if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb',", "drop_remainder=True) # Split evaluation and training. eval_ds = dataset.take(FLAGS.num_eval_steps) #", "all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush() if __name__", "x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs, parse_beam_fn):", "or implied. # See the License for the specific language", "in np.random.choice(np.arange(len(predictions)), 8): text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n'", "predictions.append(p.to_string()) except: # pylint: disable=bare-except predictions.append('Did not compile') logging.info('ios: %s',", "c_id in s if c_id > 0]) inps, outs =", "val, step) summary_writer.flush() # Beam search metrics. if (step and", "== 0: # Save unreplicated optimizer + model state. checkpoints.save_checkpoint(", "new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache': flat_cache}, flat_ids, flat_encoded,", "as np import tensorflow.compat.v2 as tf from latent_programmer import decode", "to desired size by repeating last slice.\"\"\" batch_pad = desired_batch_size", "if best_score >= len(inputs): # Found solution. break return best_p,", "jax.tree_map( lambda x: x / denominator, # pylint: disable=cell-var-from-loop metrics_sums)", "jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache'] def predict_step(params,", "cache for a given input shape and max decode length.\"\"\"", "inputs, outputs, programs, rngs={'dropout': dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits, programs,", "fast-decoder call for beam search: for beam search, we #", "per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's leaves over one device per", "''.join(s[0] for s in splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed)", "tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size * n_devices io_shape", "= decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len,", "learning_rate_fn, config, dropout_rng): \"\"\"Train on batch of program tasks.\"\"\" #", "FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step # Replicate optimizer. optimizer =", "import models as base_models from latent_programmer.decomposition_transformer_attention import decomposition_models as models", "''.join([id_char_table[c_id] for c_id in s if c_id > 0]) inps,", "factors string which can consist of: * constant: interpreted as", "= models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask,", "interpreted as the constant value, * linear_warmup: interpreted as linear", "logging.info('target: %s', targets[-1]) beams_log = [] for beam in beams:", "of input/output strings per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number of", "(1,) + x.shape), xs) def post_pmap(xs): return jax.tree_map(lambda x: x[0],", "to decay the learning rate by. steps_per_decay: How often to", "model.\"\"\" # --> [batch * beam, 1, vocab] flat_logits =", "order of log-probability. return beam_seqs # Util functions for prediction", "name == 'linear_warmup': ret *= jnp.minimum(1.0, step / warmup_steps) elif", "dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted cross", "% FLAGS.eval_freq == 0) or is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start", "== 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif name", "a batch.\"\"\" # Prepare transformer fast-decoder call for beam search:", "inps, outs = [], [] for inp, out in zip(inputs,", "a # beam search over possible sequences given input encoding.", "# pylint: disable=bare-except return None # Program does not compile.", "cycle when using cosine decay. Returns: A function learning_rate(step): float", "decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate schedule. Interprets factors in", "flags.DEFINE_string('save_dir', None, 'Directory to save results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number", "logging from flax import jax_utils from flax import linen as", "array. targets: categorical targets `[batch, length]` int array. weights: None", "time.time() for step in range(start_step, FLAGS.num_train_steps): inputs, outputs, programs =", "inputs, outputs, programs = common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs, outputs,", "pass if best_score >= len(inputs): # Found solution. break return", "%d, loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for key, val in", "metrics # Train / eval / decode step functions. #", "new_optimizer, metrics, new_dropout_rng def eval_step(params, inputs, outputs, programs, eos_token, config):", "training.\"\"\" weights = jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs != config.base_config.bos_token,", "\"bos_special_attention doesn't work when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if", "warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0 +", "acc.sum(), normalizing_factor def compute_metrics(logits, targets, weights): \"\"\"Compute summary metrics.\"\"\" loss,", "step - warmup_steps)) elif name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps)", "normalizing_factor = weights.sum() return acc.sum(), normalizing_factor def compute_metrics(logits, targets, weights):", "linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate", "is_last_step: logging.info('Gathering beam search metrics.') for beam_size in [1, 5,", "init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam(", "the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay,", "top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record", "on in_tree's leaves over one device per host.\"\"\" host2devices =", "logits and %s targets' % (str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits,", "* beam, 1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids,", "p_score >= len(inps): pred_acc += 1 ios.append(' ; '.join(map(str, zip(inps,", "= [] for index, beam in enumerate(beams[:-5:-1]): try: decoded_program =", "= weights.sum() return acc.sum(), normalizing_factor def compute_metrics(logits, targets, weights): \"\"\"Compute", "def decode_program(program): \"\"\"Decode program tokens.\"\"\" program = program[:np.argmax(program == eos_token)", "None) predicted = p_pred_step(optimizer.target, inputs, outputs, cache, beam_size) predicted =", "schedule. Interprets factors in the factors string which can consist", "of local devices for this host. n_devices = jax.local_device_count() if", "of evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training", "cycle. metrics_all = [] # Evaluation Metrics if (step and", "len(inps): pred_acc += 1 ios.append(' ; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string())", "jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98,", "= grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)", "as dsl_tokens sys.path.append('../../') gfile = tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed',", "dataset.take(FLAGS.num_eval_steps) # Decrease batch of predict dataset to handle beam", "slow_decode=slow_decode) # Beam search returns [n_batch, n_beam, n_length] with beam", "a_max=1.0e4) if jax.host_id() == 0: logging.info('Train in step: %d, loss:", "in eval_ds.as_numpy_iterator(): inputs, outputs, programs = common_utils.shard(batches) metrics = p_eval_step(optimizer.target,", "predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat()", "data is expanded in-place # rather than tiled. flat_encoded =", "n in np.random.choice(np.arange(len(predictions)), 8): text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted:", "jnp import numpy as np import tensorflow.compat.v2 as tf from", "and max decode length.\"\"\" target_shape = (programs.shape[0], max_decode_len) dtype =", "np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def tohost(x): \"\"\"Collect batches from all", "models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'],", "using relative attention # and it's not the baseline. if", "inps, outs def decode_program(program): \"\"\"Decode program tokens.\"\"\" program = program[:np.argmax(program", "tick) tick = tock summary_writer.scalar('train/steps per second', steps_per_sec, step) for", "jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted", "% (shorten(k), str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())]) # Number of", "slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord dataset.')", "latent_programmer.decomposition_transformer_attention import decomposition_models as models from latent_programmer.decomposition_transformer_attention import input_pipeline from", "1].astype(np.int32) program = program[program != bos_token] try: return dsl.decode_program(program.tolist(), id_token_table)", "eval_metrics_sums) if jax.host_id() == 0: logging.info('Evaluation time: %.4f s step", "FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config,", "relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to use special relative", "factors='constant * linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates", "'fast' logging.info( 'Prediction time, %s (beam %d): %.4f s, step", "n_length] with beam dimension # sorted in increasing order of", "are: baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to use", "* n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's", "from latent_programmer.decomposition_transformer_attention import decomposition_models as models from latent_programmer.decomposition_transformer_attention import input_pipeline", "flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant", "The amount to decay the learning rate by. steps_per_decay: How", "flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4,", "Steps per cycle when using cosine decay. Returns: A function", "from latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill", "rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate schedule. Interprets", "0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode:", "weights) metrics = { 'loss': loss, 'accuracy': acc, 'denominator': weight_sum,", "* beam_size, where each batch item's data is expanded in-place", "metrics = compute_metrics(logits, programs, weights) metrics['learning_rate'] = lr return new_optimizer,", "= jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id()) rng, init_rng = jax.random.split(rng)", "app from absl import flags from absl import logging from", "!= eos_token)), 1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs,", "# Number of local devices for this host. n_devices =", "cosine decay. Returns: A function learning_rate(step): float -> {'learning_rate': float},", "in enumerate(dsl.CHARACTER)} char_id_table = {char: id for id, char in", "eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\" best_p, best_score", "return flat_logits else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to logits", "as jnp import numpy as np import tensorflow.compat.v2 as tf", "0: logging.info('Evaluation time: %.4f s step %d, loss: %.4f.', time.time()-t_evaluation_start,", "and Optimizer # --------------------------------------------------------------------------- use_dropout = False base_config = base_models.TransformerConfig(", "jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif name == 'rsqrt_normalized_decay': ret *=", "import optim from flax.metrics import tensorboard from flax.training import checkpoints", "finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap( functools.partial(", "eval_denominator = eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda x: x /", "eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() == 0: logging.info('Evaluation", "'finetuning, or -1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to", "# TODO(jxihong): Implement fast decoding. assert FLAGS.slow_decode, 'Fast decoding is", "\"\"\"Train on batch of program tasks.\"\"\" # We handle PRNG", "for next evaluation cycle. metrics_all = [] # Evaluation Metrics", "step %d, score %.4f', slow_or_fast, beam_size, time.time() - t_inference_start, step,", "decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine", "tohost(x): \"\"\"Collect batches from all devices to host and flatten", "= batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def tohost(x): \"\"\"Collect", "import jax_utils from flax import linen as nn from flax", "can add some stalls to the devices. dropout_rng, new_dropout_rng =", "flags.DEFINE_bool('bos_special_attention', False, 'Whether to use special relative attention computation for", "jnp.broadcast_to(x, (1,) + x.shape), xs) def post_pmap(xs): return jax.tree_map(lambda x:", "1, vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask,", "* (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) else: raise", "License. # python3 \"\"\"Train seq-to-seq model on random supervised training", "def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to logits from decoder model.\"\"\"", "(beam %d): %.4f s, step %d, score %.4f', slow_or_fast, beam_size,", "in writing, software # distributed under the License is distributed", "numpy as np import tensorflow.compat.v2 as tf from latent_programmer import", "def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted accuracy for log probs", "x: jnp.broadcast_to(x, (1,) + x.shape), xs) def post_pmap(xs): return jax.tree_map(lambda", "tensorboard from flax.training import checkpoints from flax.training import common_utils import", "programs = map(tohost, (inputs, outputs, programs)) pred_denominator += programs.shape[0] for", "it. cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size % n_devices: padded_size =", "as nn from flax import optim from flax.metrics import tensorboard", "except: # pylint: disable=bare-except beams_log.append('Did not compile') logging.info('predicted beam: %s',", "decoder function, run a # beam search over possible sequences", "the lr schedule. factors: a string with factors separated by", "= jnp.where(programs > 0, 1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function", "search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step',", "per cycle when using cosine decay. Returns: A function learning_rate(step):", "d in jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0] for k in", "models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache']", "programs, max_decode_len, config): \"\"\"Initialize a cache for a given input", "metrics. metrics = compute_metrics(logits, programs, weights) metrics['learning_rate'] = lr return", "# Using the above-defined single-step decoder function, run a #", "not in hparam_str_dict: hparam_str_dict[key] = value hparam_str = ','.join(['%s=%s' %", "in sorted(hparam_str_dict.keys())]) # Number of local devices for this host.", "[] tick = time.time() for step in range(start_step, FLAGS.num_train_steps): inputs,", "= [] for beam in beams: try: beams_log.append(decode_program(beam).to_string()) except: #", "beam in enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string() except: # pylint:", "f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to tensorboard.", "solution. break return best_p, best_score def shorten(key): splits = key.split('_')", "= weights.sum() return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute", "License is distributed on an \"AS IS\" BASIS, # WITHOUT", "zip(p_outs, outputs)]) if score > best_score: best_p, best_score = p,", "License, Version 2.0 (the \"License\"); # you may not use", "programs = common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics)", "jax.tree_map( lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums)", "as text summaries. message = [] for n in np.random.choice(np.arange(len(predictions)),", "tohost(predicted) inputs, outputs, programs = map(tohost, (inputs, outputs, programs)) pred_denominator", "TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program tasks in a", "attention mask to use. Options are: baseline, ' 'bos_to_bos, bos_full_attention')", "* beam, 1, vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params':", "next evaluation cycle. metrics_all = [] # Evaluation Metrics if", "train_iter = train_ds.as_numpy_iterator() # Build Model and Optimizer # ---------------------------------------------------------------------------", "tile_dims)], axis=0) def tohost(x): \"\"\"Collect batches from all devices to", "= common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary", "compile' top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format( index, decoded_program, beam))", "use_dropout, decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config", "eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del initial_variables # Don't keep", "/ decode step functions. # ----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs,", "if FLAGS.restore_checkpoints: # Restore unreplicated optimizer + model state from", "p = parse_beam_fn(beam) p_outs = [p(inp) for inp in inputs]", "50000, 'Number of steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step", "the License for the specific language governing permissions and #", "# Write to tensorboard. if jax.host_id() == 0: slow_or_fast =", "name in factors: if name == 'constant': ret *= base_learning_rate", "normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None: acc =", "Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern: raise ValueError('Must", "- warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0", "batch_size = FLAGS.per_device_batch_size * n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "targets, weights) acc, _ = compute_weighted_accuracy(logits, targets, weights) metrics =", "= jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad =", "decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size',", "= time.time() eval_metrics = [] for batches in eval_ds.as_numpy_iterator(): inputs,", "the step-dependent lr. \"\"\" factors = [n.strip() for n in", "= checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) # Grab last step.", "as the constant value, * linear_warmup: interpreted as linear warmup", "common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng = p_train_step( optimizer, inputs, outputs, programs,", "rngs={'dropout': dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights) mean_loss =", "'Maximum number of characters in input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory", "of steps between training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of steps", "checkpointed at step %d.', start_step) if FLAGS.finetune_start_step > 0: logging.info('Checking", "+ x.shape), xs) def post_pmap(xs): return jax.tree_map(lambda x: x[0], xs)", "handle beam search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes)", "from existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of attention", "rng metrics_all = [] tick = time.time() for step in", "metrics_all) denominator = metrics_sums.pop('denominator') summary = jax.tree_map( lambda x: x", "the learning rate by. steps_per_decay: How often to decay the", "parse_beam_fn(beam) p_outs = [p(inp) for inp in inputs] score =", "FLAGS.log_freq / (tock - tick) tick = tock summary_writer.scalar('train/steps per", "eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda x: x", "initial_variables # Don't keep a copy of the initial model.", "beam_size in [1, 5, 10, 20, 50]: t_inference_start = time.time()", "jax.local_device_count()) del rng metrics_all = [] tick = time.time() for", "all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator, step)", "Model and Optimizer # --------------------------------------------------------------------------- use_dropout = False base_config =", "# pytype: disable=attribute-error import collections import functools import json import", "tokens in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number of characters in", "the learning rate. steps_per_cycle: Steps per cycle when using cosine", "not the baseline. if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type", "eval_step(params, inputs, outputs, programs, eos_token, config): \"\"\"Collect metrics for evaluation", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "(str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape))", "for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs,", "step > 0) or is_last_step: if jax.host_id() == 0: #", "from flax.training import common_utils import jax import jax.numpy as jnp", "== FLAGS.num_train_steps - 1 # Save a Checkpoint if (step", "in step: %d, loss: %.4f', step, summary['loss']) tock = time.time()", "Restore unreplicated optimizer + model state from last checkpoint. optimizer", "import functools import json import os import random import sys", "if FLAGS.slow_decode else 'fast' logging.info( 'Prediction time, %s (beam %d):", "# Save unreplicated optimizer + model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints',", "as models from latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill import dsl", "flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow", "decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator)))", "jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache'] def predict_step(params, inputs, outputs,", "else: raise ValueError('Unknown factor %s.' % name) return jnp.asarray(ret, dtype=jnp.float32)", "logging.info('Found model checkpointed at step %d.', start_step) if FLAGS.finetune_start_step >", "ret *= base_learning_rate elif name == 'linear_warmup': ret *= jnp.minimum(1.0,", "> 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if", "# distributed under the License is distributed on an \"AS", "new_dropout_rng def eval_step(params, inputs, outputs, programs, eos_token, config): \"\"\"Collect metrics", "# Unless required by applicable law or agreed to in", "best_score def shorten(key): splits = key.split('_') return ''.join(s[0] for s", "value hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for k in", "import random import sys import time from absl import app", "step % FLAGS.eval_freq == 0) or is_last_step: logging.info('Gathering evaluation metrics.')", "= decode_program(beam).to_string() except: # pylint: disable=bare-except decoded_program = 'Did not", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "optimizer + model state from last checkpoint. optimizer = checkpoints.restore_checkpoint(", "random seed for training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1,", "return None # Program does not compile. # Load Dataset", "lr. \"\"\" factors = [n.strip() for n in factors.split('*')] def", "* beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache #", "onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)", "outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: # pylint: disable=bare-except predictions.append('Did not", "'Whether to restore from existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The", "summary.items(): summary_writer.scalar('train/' + key, val, step) summary_writer.flush() # Reset metric", "summary_writer.scalar('train/' + key, val, step) summary_writer.flush() # Reset metric accumulation", "config=train_config), axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache", "== 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif", "makes sense if we are using relative attention # and", "model to handle a batch size equal to # batch_size", "256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number", "the Apache License, Version 2.0 (the \"License\"); # you may", "step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush() if __name__ == '__main__': app.run(main)", "Split evaluation and training. eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease batch", "{'params': params}, inputs, outputs, programs) return compute_metrics(logits, programs, weights) def", "nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None:", "string with factors separated by '*' that defines the schedule.", "number of tokens in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number of", "/ warmup_steps) elif name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step", "devices=devices) def pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape),", "pylint: disable=bare-except return None # Program does not compile. #", "= tock summary_writer.scalar('train/steps per second', steps_per_sec, step) for key, val", "weights = jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs != config.base_config.bos_token, programs", "key, value in json.loads(FLAGS.xm_parameters).items(): if key not in hparam_str_dict: hparam_str_dict[key]", "outs, parse_beam_fn=decode_program) if p_score >= len(inps): pred_acc += 1 ios.append('", "# Don't keep a copy of the initial model. start_step", "main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special attention only", "eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda x: x / eval_denominator, #", "jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) #", "lr = metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator')", "= models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout),", "inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) #", "(1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) else: raise ValueError('Unknown", "function.\"\"\" ret = 1.0 for name in factors: if name", "checkpoints from flax.training import common_utils import jax import jax.numpy as", "zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: # pylint: disable=bare-except predictions.append('Did", "disable=bare-except return None # Program does not compile. # Load", "name == 'constant': ret *= base_learning_rate elif name == 'linear_warmup':", "logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs) return compute_metrics(logits,", "len(char_id_table) + 1 # For padding. program_vocab_size = len(token_id_table) +", "120, 'Maximum number of characters in input/output strings.') flags.DEFINE_string('save_dir', None,", "flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore from existing model checkpoints.') flags.DEFINE_string('attention_mask_type',", "best_p, best_score def shorten(key): splits = key.split('_') return ''.join(s[0] for", "import app from absl import flags from absl import logging", "compile') logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam = [] for index,", "outputs, programs, eos_token, config): \"\"\"Collect metrics for evaluation during training.\"\"\"", "5, 10, 20, 50]: t_inference_start = time.time() pred_acc = 0", "tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id in s if c_id >", "Args: logits: `[batch, length, num_classes]` float array. targets: categorical targets", "number of characters in input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory to", "transformer fast-decoder call for beam search: for beam search, we", "on a batch.\"\"\" # Prepare transformer fast-decoder call for beam", "from flax.training import checkpoints from flax.training import common_utils import jax", "program_vocab_size = len(token_id_table) + 1 bos_token = token_id_table[dsl.BOS] eos_token =", "key.split('_') return ''.join(s[0] for s in splits) def main(_): tf.enable_v2_behavior()", "'Number of steps between training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of", "warmup_steps)) elif name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /=", "shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed)", "(programs.shape[0], max_decode_len) dtype = config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape,", "dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program tasks in a batch.')", "50000, 'Number of steps between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000,", "'checkpoints', hparam_str), optimizer) # Grab last step. start_step = int(optimizer.state.step)", "Periodic metric handling. # Training Metrics if (step and step", "[batch, length, 1] Returns: Tuple of scalar loss and batch", "and %s targets' % (str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1),", "jax.host_id() == 0: slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast'", "beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except beams_log.append('Did not compile') logging.info('predicted beam:", "it outside in the training loop - doing the #", "return flat_logits, new_flat_cache # Using the above-defined single-step decoder function,", "np.ceil(cur_pred_batch_size / n_devices) * n_devices) # pylint: disable=cell-var-from-loop pred_batch =", "beam_seqs # Util functions for prediction # ----------------------------------------------------------------------------- def pad_examples(x,", "dropout_rng = jax.random.split(rng, jax.local_device_count()) del rng metrics_all = [] tick", "has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch')", "flags.DEFINE_bool('use_relative_attention', True, 'Whether to use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False,", "except: # pylint: disable=bare-except decoded_program = 'Did not compile' top_of_beam.append('index:", "keep a copy of the initial model. start_step = 0", "# Restore unreplicated optimizer + model state from last checkpoint.", "log-probability. return beam_seqs # Util functions for prediction # -----------------------------------------------------------------------------", "Build Model and Optimizer # --------------------------------------------------------------------------- use_dropout = False base_config", "and entropy for log probs and targets. Args: logits: `[batch,", "accuracy and batch normalizing factor. \"\"\" if logits.ndim != targets.ndim", "'BOS tokens.') _internal = False if not _internal: flags.DEFINE_string('xm_parameters', None,", "for id, char in id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size", "# Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing dataset.') if not FLAGS.dataset_filepattern:", "under the License is distributed on an \"AS IS\" BASIS,", "import tensorflow.compat.v2 as tf from latent_programmer import decode from latent_programmer", "beam, 1, vocab] --> [batch * beam, vocab] flat_logits =", "\"\"\"Token slice to logits from decoder model.\"\"\" # --> [batch", "decay_every: Every k steps decay the learning rate by decay_factor.", "weights normalizing_factor = weights.sum() return acc.sum(), normalizing_factor def compute_metrics(logits, targets,", "models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng", "of training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.') flags.DEFINE_integer('log_freq',", "shapes. Got shape %s logits and %s targets' % (str(logits.shape),", "# pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr # Calculate (clipped)", "rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim',", "decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng,", "over one device per host.\"\"\" host2devices = collections.defaultdict(list) for d", "# Evaluation Metrics if (step and step % FLAGS.eval_freq ==", "predict_step(params, inputs, outputs, cache, beam_size, eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict", "= loss * weights normalizing_factor = weights.sum() return loss.sum(), normalizing_factor", "flatten batch dimensions.\"\"\" n_device, n_batch, *remaining_dims = x.shape return x.reshape((n_device", "char in id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table)", "model state from last checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints',", "+ tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's leaves over", "== 0 and step > 0) or is_last_step: if jax.host_id()", "by. steps_per_decay: How often to decay the learning rate. steps_per_cycle:", "+= 1 ios.append(' ; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string())", "inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\" best_p, best_score =", "% (str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets", "acc, _ = compute_weighted_accuracy(logits, targets, weights) metrics = { 'loss':", "normalizing_factor = weights.sum() return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None):", "pred_batch = jax.tree_map( lambda x: pad_examples(x, padded_size), pred_batch) inputs, outputs,", "flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token", "if we are using relative attention # and it's not", "# Reset metric accumulation for next evaluation cycle. metrics_all =", "batch dimensions.\"\"\" n_device, n_batch, *remaining_dims = x.shape return x.reshape((n_device *", "host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices) def", "0) or is_last_step: logging.info('Gathering beam search metrics.') for beam_size in", "in-place # rather than tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params':", "cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: base_learning_rate: float,", "if c_id > 0]) inps, outs = [], [] for", "linear warmup until warmup_steps, * rsqrt_decay: divide by square root", "Calculate (clipped) perplexity after averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)", "id for id, char in id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables()", "dimension: # [batch * beam, 1, vocab] --> [batch *", "logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.') flags.DEFINE_integer('predict_freq', 50000,", "decoded: {}, tokens: {}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator", "splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special", "hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup * rsqrt_normalized_decay',", "num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token)", "'Number of layers.') flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode',", "targets, weights=None): \"\"\"Compute weighted accuracy for log probs and targets.", "slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train Loop # --------------------------------------------------------------------------- dropout_rng", "predictions, top_of_beams = [], [], [], [] for batches in", "padded_size), pred_batch) inputs, outputs, programs = common_utils.shard(pred_batch) cache = (p_init_cache(inputs,", "score = np.sum([p_out == out for p_out, out in zip(p_outs,", "jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))", "eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number of steps between prediction (beam search).')", "= jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda", "= [] for batches in eval_ds.as_numpy_iterator(): inputs, outputs, programs =", "[], [] for inp, out in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out))", "base_learning_rate=0.5, factors='constant * linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000):", "config, dropout_rng): \"\"\"Train on batch of program tasks.\"\"\" # We", "jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size", "disable=wrong-arg-count # pytype: disable=attribute-error import collections import functools import json", "create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000,", "flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined", "a batch size equal to # batch_size * beam_size, where", "s in splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) #", "text = (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n')", "training loop - doing the # latter can add some", "Beam search metrics. if (step and step % FLAGS.predict_freq ==", "tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special attention only makes sense", "targets `[batch, length]` int array. weights: None or array of", "evaluation metrics.') t_evaluation_start = time.time() eval_metrics = [] for batches", "string which can consist of: * constant: interpreted as the", "starting constant for the lr schedule. factors: a string with", "weights=None): \"\"\"Compute weighted cross entropy and entropy for log probs", "ANY KIND, either express or implied. # See the License", "/ eval / decode step functions. # ----------------------------------------------------------------------------- def train_step(optimizer,", "\"\"\"Collect metrics for evaluation during training.\"\"\" weights = jnp.where( jnp.logical_and(programs", "except: # pylint: disable=bare-except return None # Program does not", "# Calculate (clipped) perplexity after averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']),", "= eval_predicted( beams, inps, outs, parse_beam_fn=decode_program) if p_score >= len(inps):", "import input_pipeline from latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill import tokens", "cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam", "and step % FLAGS.predict_freq == 0) or is_last_step: logging.info('Gathering beam", "the License. # You may obtain a copy of the", "= jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices) def pre_pmap(xs): return", "from flax import optim from flax.metrics import tensorboard from flax.training", "'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to use relative positonal embeddings.')", "'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache']", "of attention mask to use. Options are: baseline, ' 'bos_to_bos,", "warmup_steps)) elif name == 'decay_every': ret *= (decay_factor**(step // steps_per_decay))", "models from latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill import dsl from", "flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program tasks in a batch.') flags.DEFINE_integer('num_strings_per_task',", "jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast decoding. assert FLAGS.slow_decode, 'Fast decoding", "# See the License for the specific language governing permissions", "jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9,", "with factors separated by '*' that defines the schedule. warmup_steps:", "logging.info('Gathering beam search metrics.') for beam_size in [1, 5, 10,", "flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] # Remove", "jnp.logical_and(programs > 0, jnp.logical_and(programs != config.base_config.bos_token, programs != eos_token)), 1,", "return step_fn def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted cross entropy", "0, jnp.logical_and(programs != config.base_config.bos_token, programs != eos_token)), 1, 0).astype(jnp.float32) logits", "return jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute", "host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices) def pre_pmap(xs):", "final odd-sized batch by padding instead of dropping it. cur_pred_batch_size", "time.time() - t_inference_start, step, all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size),", "num_classes]` float array. targets: categorical targets `[batch, length]` int array.", "grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad,", "max_decode_len) dtype = config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype),", "special relative attention computation for ' 'BOS tokens.') _internal =", "disable=cell-var-from-loop pred_batch = jax.tree_map( lambda x: pad_examples(x, padded_size), pred_batch) inputs,", "from flax import jax_utils from flax import linen as nn", "beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del initial_variables #", "= compute_weighted_accuracy(logits, targets, weights) metrics = { 'loss': loss, 'accuracy':", "it's not the baseline. if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or", "disable=bare-except predictions.append('Did not compile') logging.info('ios: %s', ios[-1]) logging.info('target: %s', targets[-1])", "static_broadcasted_argnums=(4,)) # Main Train Loop # --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng,", "flags.DEFINE_integer('predict_freq', 50000, 'Number of steps between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq',", "*= (decay_factor**(step // steps_per_decay)) elif name == 'cosine_decay': progress =", "optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del", "= program[program != bos_token] try: return dsl.decode_program(program.tolist(), id_token_table) except: #", "in a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output strings per", "on batch of program tasks.\"\"\" # We handle PRNG splitting", "if not FLAGS.slow_decode else None) predicted = p_pred_step(optimizer.target, inputs, outputs,", "expanded in-place # rather than tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply(", "inputs, outputs, programs = map(tohost, (inputs, outputs, programs)) pred_denominator +=", "sequences given input encoding. beam_seqs, _ = decode.beam_search( inputs, cache,", "step % FLAGS.predict_freq == 0) or is_last_step: logging.info('Gathering beam search", "splits = key.split('_') return ''.join(s[0] for s in splits) def", "positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to use special relative attention", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# pylint: disable=bare-except decoded_program = 'Did not compile' top_of_beam.append('index: {},", "= p_pred_step(optimizer.target, inputs, outputs, cache, beam_size) predicted = tohost(predicted) inputs,", "eos_token, config): \"\"\"Collect metrics for evaluation during training.\"\"\" weights =", "checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) # Grab last step. start_step", "writing, software # distributed under the License is distributed on", "use. Options are: baseline, ' 'bos_to_bos, bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether", "steps_per_sec, step) for key, val in summary.items(): summary_writer.scalar('train/' + key,", "latent_programmer.tasks.robust_fill import dsl from latent_programmer.tasks.robust_fill import tokens as dsl_tokens sys.path.append('../../')", "xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted", "'Did not compile' top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format( index,", "between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial checkpoint should", "import numpy as np import tensorflow.compat.v2 as tf from latent_programmer", "= (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup", "post_pmap(xs): return jax.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted,", "and step % FLAGS.log_freq == 0) or is_last_step: logging.info('Gathering training", "random supervised training tasks.\"\"\" # pytype: disable=wrong-arg-count # pytype: disable=attribute-error", "program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table)", "logging.info('Gathering evaluation metrics.') t_evaluation_start = time.time() eval_metrics = [] for", "steps_per_decay)) elif name == 'cosine_decay': progress = jnp.maximum(0.0, (step -", "\"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id in s if", "programs)) pred_denominator += programs.shape[0] for i, beams in enumerate(predicted): inps,", "outs def decode_program(program): \"\"\"Decode program tokens.\"\"\" program = program[:np.argmax(program ==", "0: logging.info('Train in step: %d, loss: %.4f', step, summary['loss']) tock", "def pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)", "outputs[i]) p, p_score = eval_predicted( beams, inps, outs, parse_beam_fn=decode_program) if", "(str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets *", "eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict translation with fast decoding beam", "init_rng = jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params':", "post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs, outputs, parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\"", "item's data is expanded in-place # rather than tiled. flat_encoded", "loss: %.4f', step, summary['loss']) tock = time.time() steps_per_sec = FLAGS.log_freq", "acc, 'denominator': weight_sum, } metrics = jax.lax.psum(metrics, 'batch') return metrics", "= { 'loss': loss, 'accuracy': acc, 'denominator': weight_sum, } metrics", "`[batch, length]` int array. weights: None or array of shape", "compute_weighted_cross_entropy(logits, targets, weights) acc, _ = compute_weighted_accuracy(logits, targets, weights) metrics", "lr # Calculate (clipped) perplexity after averaging log-perplexities: summary['perplexity'] =", "n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's leaves", "probs and targets. Args: logits: `[batch, length, num_classes]` float array.", "elif name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps))", "search, we # need to set up our decoder model", "* constant: interpreted as the constant value, * linear_warmup: interpreted", "desired size by repeating last slice.\"\"\" batch_pad = desired_batch_size -", "metrics = jax.lax.psum(metrics, 'batch') return metrics # Train / eval", "return new_optimizer, metrics, new_dropout_rng def eval_step(params, inputs, outputs, programs, eos_token,", "in zip(p_outs, outputs)]) if score > best_score: best_p, best_score =", "Number of local devices for this host. n_devices = jax.local_device_count()", "Cyclic cosine decay, uses steps_per_cycle parameter. Args: base_learning_rate: float, the", "up for in the warmup schedule. decay_factor: The amount to", "params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs > 0,", "functools import json import os import random import sys import", "[], [], [], [] for batches in predict_ds.as_numpy_iterator(): pred_batch =", "import collections import functools import json import os import random", "outputs, programs = common_utils.shard(batches) metrics = p_eval_step(optimizer.target, inputs, outputs, programs)", "dataset. logging.info('Loading dataset from %s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:],", "0, 'Fixed random seed for training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.')", "outputs, programs, learning_rate_fn, config, dropout_rng): \"\"\"Train on batch of program", "initial model. start_step = 0 if FLAGS.restore_checkpoints: # Restore unreplicated", "outputs, programs = common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs, programs) if", "try: decoded_program = decode_program(beam).to_string() except: # pylint: disable=bare-except decoded_program =", "pytype: disable=wrong-arg-count # pytype: disable=attribute-error import collections import functools import", "flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return", "compute_metrics(logits, programs, weights) def initialize_cache(inputs, outputs, programs, max_decode_len, config): \"\"\"Initialize", "= (p_init_cache(inputs, outputs, programs) if not FLAGS.slow_decode else None) predicted", "normalizing_factor def compute_metrics(logits, targets, weights): \"\"\"Compute summary metrics.\"\"\" loss, weight_sum", "= jnp.prod(jnp.asarray(targets.shape)) if weights is not None: loss = loss", "(progress % 1.0)))) else: raise ValueError('Unknown factor %s.' % name)", "new_flat_cache = new_vars['cache'] # Remove singleton sequence-length dimension: # [batch", "best_score = p, score except: # pylint: disable=bare-except pass if", "config, slow_decode=True): \"\"\"Predict translation with fast decoding beam search on", "decay_factor: The amount to decay the learning rate by. steps_per_decay:", "if p_score >= len(inps): pred_acc += 1 ios.append(' ; '.join(map(str,", "models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs) return compute_metrics(logits, programs, weights)", "batch_pad = desired_batch_size - x.shape[0] tile_dims = [1] * len(x.shape)", "flax.training import common_utils import jax import jax.numpy as jnp import", "= tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random seed", "logging.info('Evaluation time: %.4f s step %d, loss: %.4f.', time.time()-t_evaluation_start, step,", "= models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(", "# Replicate optimizer. optimizer = jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast", "steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number", "Evaluation Metrics if (step and step % FLAGS.eval_freq == 0)", "= models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs) return compute_metrics(logits, programs,", "p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step =", "flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache': flat_cache}, flat_ids,", "loss / weight_sum return mean_loss, logits step = optimizer.state.step lr", "outputs, programs, rngs={'dropout': dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)", "> 0]) inps, outs = [], [] for inp, out", "beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using", "def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5,", "jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda x:", "def pad_examples(x, desired_batch_size): \"\"\"Expand batch to desired size by repeating", "start_step (%s) == finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert start_step ==", "create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant LR for finetuning. learning_rate_fn =", "= (f'ios: {ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text)", "between training logs.') flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.')", "summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() == 0: logging.info('Train in", "array. weights: None or array of shape [batch, length, 1]", "coding=utf-8 # Copyright 2021 The Google Research Authors. # #", "flax.metrics import tensorboard from flax.training import checkpoints from flax.training import", "pad_examples(x, desired_batch_size): \"\"\"Expand batch to desired size by repeating last", "use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng =", "programs, rngs={'dropout': dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights) mean_loss", "metrics_all = common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all)", "cur_pred_batch_size % n_devices: padded_size = int( np.ceil(cur_pred_batch_size / n_devices) *", "else: def tokens_ids_to_logits(flat_ids, flat_cache): \"\"\"Token slice to logits from decoder", "+ 1].astype(np.int32) program = program[program != bos_token] try: return dsl.decode_program(program.tolist(),", "logits.ndim != targets.ndim + 1: raise ValueError('Incorrect shapes. Got shape", "p_outs = [p(inp) for inp in inputs] score = np.sum([p_out", "if jax.host_id() == 0: # Save unreplicated optimizer + model", "os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) # Grab last step. start_step =", "%s logits and %s targets' % (str(logits.shape), str(targets.shape))) acc =", "return beam_seqs # Util functions for prediction # ----------------------------------------------------------------------------- def", "# Handle final odd-sized batch by padding instead of dropping", "== eos_token) + 1].astype(np.int32) program = program[program != bos_token] try:", "p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache = jax.pmap(", "enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string() except: # pylint: disable=bare-except decoded_program", "os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size * n_devices io_shape =", "train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config),", "of scalar loss and batch normalizing factor. \"\"\" if logits.ndim", "time from absl import app from absl import flags from", "-1 # predicted shape [beam_size, length] for beam in predicted[::-1]:", "FLAGS.slow_decode else 'fast' logging.info( 'Prediction time, %s (beam %d): %.4f", "equal to # batch_size * beam_size, where each batch item's", "1: raise ValueError('Incorrect shapes. Got shape %s logits and %s", "of tokens in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number of characters", "'Number of steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the", "for k in sorted(hparam_str_dict.keys())]) # Number of local devices for", "token tables. id_char_table = {i+1: char for (i, char) in", "dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights = jnp.where(programs > 0, 1,", "= 1.0 for name in factors: if name == 'constant':", "tasks.\"\"\" # We handle PRNG splitting inside the top pmap,", "> 0: logging.info('Checking that start_step (%s) == finetune_start_step (%s)', start_step,", "desired_batch_size - x.shape[0] tile_dims = [1] * len(x.shape) tile_dims[0] =", "(%s) == finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step", "mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config =", "Grab last step. start_step = int(optimizer.state.step) logging.info('Found model checkpointed at", "learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch')", "normalizing factor. \"\"\" if logits.ndim != targets.ndim + 1: raise", "inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs def decode_program(program): \"\"\"Decode program tokens.\"\"\"", "output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention,", "that start_step (%s) == finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert start_step", "= p_train_step( optimizer, inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step =", "import sys import time from absl import app from absl", "host. n_devices = jax.local_device_count() if jax.host_id() == 0: summary_writer =", "pylint: disable=bare-except decoded_program = 'Did not compile' top_of_beam.append('index: {}, decoded:", "steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial checkpoint", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights = jnp.where(programs > 0,", "sorted(hparam_str_dict.keys())]) # Number of local devices for this host. n_devices", "outputs, programs = map(tohost, (inputs, outputs, programs)) pred_denominator += programs.shape[0]", "= token_id_table[dsl.EOS] # Parse io and program token sequences (for", "targets, predictions, top_of_beams = [], [], [], [] for batches", "# limitations under the License. # python3 \"\"\"Train seq-to-seq model", "in the factors string which can consist of: * constant:", "= len(token_id_table) + 1 bos_token = token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS]", "= FLAGS.log_freq / (tock - tick) tick = tock summary_writer.scalar('train/steps", "FLAGS.finetune_start_step > 0: logging.info('Checking that start_step (%s) == finetune_start_step (%s)',", "vocab] flat_logits = models.DecomposeAttentionTransformer(config=config).apply( {'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode)", "new_flat_cache # Using the above-defined single-step decoder function, run a", "beam search, we # need to set up our decoder", "existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind of attention mask", "FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL #", "- t_inference_start, step, all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "to logits from decoder model.\"\"\" # --> [batch * beam,", "# ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand batch to desired size", "weights is not None: acc = acc * weights normalizing_factor", "length, 1] Returns: Tuple of scalar loss and batch normalizing", "are using relative attention # and it's not the baseline.", "attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not", "and step > 0) or is_last_step: if jax.host_id() == 0:", "import os import random import sys import time from absl", "decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask =", "jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() == 0: logging.info('Train in step: %d,", "token_id_table[dsl.EOS] # Parse io and program token sequences (for eval).", "if (step and step % FLAGS.log_freq == 0) or is_last_step:", "base_learning_rate elif name == 'linear_warmup': ret *= jnp.minimum(1.0, step /", "FLAGS.checkpoint_freq == 0 and step > 0) or is_last_step: if", "programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "flags.DEFINE_integer('max_characters', 120, 'Maximum number of characters in input/output strings.') flags.DEFINE_string('save_dir',", "# pytype: disable=wrong-arg-count # pytype: disable=attribute-error import collections import functools", "gfile = tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random", "= 0 if FLAGS.restore_checkpoints: # Restore unreplicated optimizer + model", "hparam_str_dict: hparam_str_dict[key] = value hparam_str = ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k]))", "of Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?')", "def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits from decoder model.\"\"\" #", "key, val in eval_summary.items(): summary_writer.scalar('eval/' + key, val, step) summary_writer.flush()", "a cache for a given input shape and max decode", "= config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype),", "None: acc = acc * weights normalizing_factor = weights.sum() return", "to set up our decoder model to handle a batch", "use_dropout = False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim,", "increasing order of log-probability. return beam_seqs # Util functions for", "io examples tokens.\"\"\" def decode_str(s): \"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id]", "'loss': loss, 'accuracy': acc, 'denominator': weight_sum, } metrics = jax.lax.psum(metrics,", "batches in eval_ds.as_numpy_iterator(): inputs, outputs, programs = common_utils.shard(batches) metrics =", "optimizer, inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step ==", "batches in predict_ds.as_numpy_iterator(): pred_batch = batches # Handle final odd-sized", "decoding beam search on a batch.\"\"\" # Prepare transformer fast-decoder", "predicted shape [beam_size, length] for beam in predicted[::-1]: try: p", "[] for index, beam in enumerate(beams[:-5:-1]): try: decoded_program = decode_program(beam).to_string()", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "# Parse io and program token sequences (for eval). def", "to restore from existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention', 'The kind", "metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator') summary =", "== 'decay_every': ret *= (decay_factor**(step // steps_per_decay)) elif name ==", "= -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights", "program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number of characters in input/output strings.')", "decode length.\"\"\" target_shape = (programs.shape[0], max_decode_len) dtype = config.base_config.dtype initial_variables", "try: predictions.append(p.to_string()) except: # pylint: disable=bare-except predictions.append('Did not compile') logging.info('ios:", "= decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask", "for key, value in json.loads(FLAGS.xm_parameters).items(): if key not in hparam_str_dict:", "True, 'Whether to restore from existing model checkpoints.') flags.DEFINE_string('attention_mask_type', 'bos_full_attention',", "host.\"\"\" host2devices = collections.defaultdict(list) for d in jax.devices(): host2devices[d.host_id].append(d) devices", "LR for finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step =", "for name in factors: if name == 'constant': ret *=", "base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch')", "{predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to tensorboard. if", "for n in factors.split('*')] def step_fn(step): \"\"\"Step to learning rate", "specific language governing permissions and # limitations under the License.", "/ (tock - tick) tick = tock summary_writer.scalar('train/steps per second',", "zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs def decode_program(program): \"\"\"Decode", "batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output strings per task.') flags.DEFINE_integer('max_program_length',", "shape [batch, length, 1] Returns: Tuple of scalar loss and", "heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None,", "= models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention)", "Beam search returns [n_batch, n_beam, n_length] with beam dimension #", "enumerate(predicted): inps, outs = decode_io(inputs[i], outputs[i]) p, p_score = eval_predicted(", "size equal to # batch_size * beam_size, where each batch", "not None: acc = acc * weights normalizing_factor = weights.sum()", "per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number of tokens in program.')", "= jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is", "beams in enumerate(predicted): inps, outs = decode_io(inputs[i], outputs[i]) p, p_score", "the # latter can add some stalls to the devices.", "metrics.') for beam_size in [1, 5, 10, 20, 50]: t_inference_start", "# you may not use this file except in compliance", "jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9,", "def predict_step(params, inputs, outputs, cache, beam_size, eos_token, max_decode_len, config, slow_decode=True):", "0 ios, targets, predictions, top_of_beams = [], [], [], []", "FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset", "step) for key, val in summary.items(): summary_writer.scalar('train/' + key, val,", "= tohost(predicted) inputs, outputs, programs = map(tohost, (inputs, outputs, programs))", "add some stalls to the devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)", "functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main", "this host. n_devices = jax.local_device_count() if jax.host_id() == 0: summary_writer", "int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator()", "p_out, out in zip(p_outs, outputs)]) if score > best_score: best_p,", "translation with fast decoding beam search on a batch.\"\"\" #", "= jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step = jax.pmap(", "name == 'decay_every': ret *= (decay_factor**(step // steps_per_decay)) elif name", "jnp.where(outputs > 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size)", "jnp.minimum(1.0, step / warmup_steps) elif name == 'rsqrt_decay': ret /=", "acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights", "optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics. metrics = compute_metrics(logits, programs, weights)", "in [1, 5, 10, 20, 50]: t_inference_start = time.time() pred_acc", "all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record beam search", "of steps between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of", "decoded_program = 'Did not compile' top_of_beam.append('index: {}, decoded: {}, tokens:", "params}, inputs, outputs, programs) return compute_metrics(logits, programs, weights) def initialize_cache(inputs,", "program tasks.\"\"\" # We handle PRNG splitting inside the top", "max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search returns [n_batch, n_beam, n_length] with", "metrics = { 'loss': loss, 'accuracy': acc, 'denominator': weight_sum, }", "disable=bare-except beams_log.append('Did not compile') logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam =", "and targets. Args: logits: `[batch, length, num_classes]` float array. targets:", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"\"\"Compute summary metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights) acc,", "# rather than tiled. flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params},", "targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: # pylint: disable=bare-except predictions.append('Did not compile')", "step. start_step = int(optimizer.state.step) logging.info('Found model checkpointed at step %d.',", "logging.info('padded_shapes: %s', padded_shapes) dataset = input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset", "optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer) # Grab last", "decode_str(s): \"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id] for c_id in s", "cosine decay, uses steps_per_cycle parameter. Args: base_learning_rate: float, the starting", "targets: categorical targets `[batch, length]` int array. weights: None or", "_ = decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token,", "decode_program(program): \"\"\"Decode program tokens.\"\"\" program = program[:np.argmax(program == eos_token) +", "= lr # Calculate (clipped) perplexity after averaging log-perplexities: summary['perplexity']", "dsl from latent_programmer.tasks.robust_fill import tokens as dsl_tokens sys.path.append('../../') gfile =", "jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not", "if weights is not None: acc = acc * weights", "fast decoding. assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.'", "for d in jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0] for k", "Tuple of scalar accuracy and batch normalizing factor. \"\"\" if", "0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant LR for", "* decay_every: Every k steps decay the learning rate by", "flat_encoded = decode.flat_batch_beam_expand( models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size)", "and step % FLAGS.eval_freq == 0) or is_last_step: logging.info('Gathering evaluation", "under the Apache License, Version 2.0 (the \"License\"); # you", "\"\"\"Initialize a cache for a given input shape and max", "= int(optimizer.state.step) logging.info('Found model checkpointed at step %d.', start_step) if", "False, 'Whether to use special relative attention computation for '", "= [n.strip() for n in factors.split('*')] def step_fn(step): \"\"\"Step to", "== out for p_out, out in zip(p_outs, outputs)]) if score", "and # limitations under the License. # python3 \"\"\"Train seq-to-seq", "'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program tasks", "disable=attribute-error import collections import functools import json import os import", "metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights) acc, _ =", "log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() == 0: logging.info('Train", "# coding=utf-8 # Copyright 2021 The Google Research Authors. #", "= time.time() for step in range(start_step, FLAGS.num_train_steps): inputs, outputs, programs", "all devices to host and flatten batch dimensions.\"\"\" n_device, n_batch,", "== 'cosine_decay': progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle))", "bos_full_attention') flags.DEFINE_bool('use_relative_attention', True, 'Whether to use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention',", "scalar loss and batch normalizing factor. \"\"\" if logits.ndim !=", "jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape,", "shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not", "evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.')", "score %.4f', slow_or_fast, beam_size, time.time() - t_inference_start, step, all_pred_acc /", "in beams: try: beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except beams_log.append('Did not", "beam_seqs, _ = decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token,", "== 0: logging.info('Evaluation time: %.4f s step %d, loss: %.4f.',", "computation for ' 'BOS tokens.') _internal = False if not", "= jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step = jax.pmap(", "x.shape), xs) def post_pmap(xs): return jax.tree_map(lambda x: x[0], xs) return", "score except: # pylint: disable=bare-except pass if best_score >= len(inputs):", "given input encoding. beam_seqs, _ = decode.beam_search( inputs, cache, tokens_ids_to_logits,", "padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes) dataset =", "else: # Constant LR for finetuning. learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr,", "0 and step > 0) or is_last_step: if jax.host_id() ==", "beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del initial_variables # Don't", "Tuple of scalar loss and batch normalizing factor. \"\"\" if", "float}, the step-dependent lr. \"\"\" factors = [n.strip() for n", "in predict_ds.as_numpy_iterator(): pred_batch = batches # Handle final odd-sized batch", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for AdamW-style weight", "# --------------------------------------------------------------------------- use_dropout = False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size,", "logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs, rngs={'dropout': dropout_rng})", "base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not", "return jax.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree))) def eval_predicted(predicted, inputs,", "def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special attention", "jax.lax.psum(x, 'i'), 'i', devices=devices) def pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x,", "averaging log-perplexities: summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4) if jax.host_id() == 0:", "lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop eval_metrics_sums) if", "%.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for key, val in eval_summary.items(): summary_writer.scalar('eval/'", "flags.DEFINE_integer('eval_freq', 2000, 'Number of steps between eval.') flags.DEFINE_integer('predict_freq', 50000, 'Number", "dataset to handle beam search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size /", "implemented yet.' if FLAGS.finetune_start_step <= 0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr)", "or is_last_step: if jax.host_id() == 0: # Save unreplicated optimizer", "metrics.') t_evaluation_start = time.time() eval_metrics = [] for batches in", "Don't keep a copy of the initial model. start_step =", "= learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad =", "input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True)", "for batches in eval_ds.as_numpy_iterator(): inputs, outputs, programs = common_utils.shard(batches) metrics", "best_score = None, -1 # predicted shape [beam_size, length] for", "jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i', devices=devices) def pre_pmap(xs): return jax.tree_map(lambda", "step, summary['loss']) tock = time.time() steps_per_sec = FLAGS.log_freq / (tock", "loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights) mean_loss = loss /", "often to decay the learning rate. steps_per_cycle: Steps per cycle", "sequences (for eval). def decode_io(inputs, outputs): \"\"\"Decode io examples tokens.\"\"\"", "in json.loads(FLAGS.xm_parameters).items(): if key not in hparam_str_dict: hparam_str_dict[key] = value", "\"\"\" if logits.ndim != targets.ndim + 1: raise ValueError('Incorrect shapes.", "weighted accuracy for log probs and targets. Args: logits: `[batch,", "metrics, dropout_rng = p_train_step( optimizer, inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics)", "# Beam search returns [n_batch, n_beam, n_length] with beam dimension", "jax import jax.numpy as jnp import numpy as np import", "and batch normalizing factor. \"\"\" if logits.ndim != targets.ndim +", "by '*' that defines the schedule. warmup_steps: how many steps", "inps, outs = decode_io(inputs[i], outputs[i]) p, p_score = eval_predicted( beams,", "1 # Save a Checkpoint if (step % FLAGS.checkpoint_freq ==", "= common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator", "in predicted[::-1]: try: p = parse_beam_fn(beam) p_outs = [p(inp) for", "0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids):", "loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights) acc, _ = compute_weighted_accuracy(logits,", "def initialize_cache(inputs, outputs, programs, max_decode_len, config): \"\"\"Initialize a cache for", "max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig(", "(shorten(k), str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())]) # Number of local", "Checkpoint if (step % FLAGS.checkpoint_freq == 0 and step >", "for log probs and targets. Args: logits: `[batch, length, num_classes]`", "last slice.\"\"\" batch_pad = desired_batch_size - x.shape[0] tile_dims = [1]", "# Setup DSL # --------------------------------------------------------------------------- # Build token tables. id_char_table", "programs = common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs, programs) if not", "len(x.shape) tile_dims[0] = batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def", "slow_decode=True): \"\"\"Predict translation with fast decoding beam search on a", "FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.')", "padded_size = int( np.ceil(cur_pred_batch_size / n_devices) * n_devices) # pylint:", "max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step = jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length,", "\"\"\"Collect batches from all devices to host and flatten batch", "0 pred_denominator = 0 ios, targets, predictions, top_of_beams = [],", "*remaining_dims = x.shape return x.reshape((n_device * n_batch,) + tuple(remaining_dims)) def", "= 'Did not compile' top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format(", "{i+1: char for (i, char) in enumerate(dsl.CHARACTER)} char_id_table = {char:", "\"\"\"Compute weighted accuracy for log probs and targets. Args: logits:", "attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id()) rng,", "yet.' if FLAGS.finetune_start_step <= 0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else:", "beam search metrics.') for beam_size in [1, 5, 10, 20,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "We handle PRNG splitting inside the top pmap, rather #", "batch to desired size by repeating last slice.\"\"\" batch_pad =", "[1] * len(x.shape) tile_dims[0] = batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)],", "step, eval_summary['loss']) for key, val in eval_summary.items(): summary_writer.scalar('eval/' + key,", "dropping it. cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size % n_devices: padded_size", "ret = 1.0 for name in factors: if name ==", "if name == 'constant': ret *= base_learning_rate elif name ==", "elif name == 'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step,", "to decay the learning rate. steps_per_cycle: Steps per cycle when", "max decode length.\"\"\" target_shape = (programs.shape[0], max_decode_len) dtype = config.base_config.dtype", "possible sequences given input encoding. beam_seqs, _ = decode.beam_search( inputs,", "we are using relative attention # and it's not the", "train_ds.as_numpy_iterator() # Build Model and Optimizer # --------------------------------------------------------------------------- use_dropout =", "model. start_step = 0 if FLAGS.restore_checkpoints: # Restore unreplicated optimizer", "of scalar accuracy and batch normalizing factor. \"\"\" if logits.ndim", "evaluation during training.\"\"\" weights = jnp.where( jnp.logical_and(programs > 0, jnp.logical_and(programs", "{ 'loss': loss, 'accuracy': acc, 'denominator': weight_sum, } metrics =", "up our decoder model to handle a batch size equal", "jnp.cos(jnp.pi * (progress % 1.0)))) else: raise ValueError('Unknown factor %s.'", "Apache License, Version 2.0 (the \"License\"); # you may not", "the baseline. if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type ==", "for beam_size in [1, 5, 10, 20, 50]: t_inference_start =", "pred_acc = 0 pred_denominator = 0 ios, targets, predictions, top_of_beams", "either express or implied. # See the License for the", "summary_writer.scalar('eval/' + key, val, step) summary_writer.flush() # Beam search metrics.", "deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention)", "common_utils import jax import jax.numpy as jnp import numpy as", "0) or is_last_step: if jax.host_id() == 0: # Save unreplicated", "config.base_config.bos_token, programs != eos_token)), 1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params':", "'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32), jnp.ones(program_shape, jnp.float32)) optimizer_def =", "lambda x: x / denominator, # pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate']", "= eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map( lambda x: x / eval_denominator,", "of characters in input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory to save", "jnp.ones(program_shape, jnp.float32)) optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay)", "per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record beam search results as", "steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning rate schedule. Interprets factors in the", "import flags from absl import logging from flax import jax_utils", "'Number of steps between prediction (beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number", "char for (i, char) in enumerate(dsl.CHARACTER)} char_id_table = {char: id", "# Get hyperparmaters if FLAGS.xm_parameters: for key, value in json.loads(FLAGS.xm_parameters).items():", "lr return new_optimizer, metrics, new_dropout_rng def eval_step(params, inputs, outputs, programs,", "Training dataset. logging.info('Loading dataset from %s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:],", "tasks in a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number of input/output strings", "ret *= (decay_factor**(step // steps_per_decay)) elif name == 'cosine_decay': progress", "# Program does not compile. # Load Dataset # ---------------------------------------------------------------------------", "predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type,", "repeating last slice.\"\"\" batch_pad = desired_batch_size - x.shape[0] tile_dims =", "programs, weights) metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng def", "512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of layers.') flags.DEFINE_integer('num_layers', 3,", "Args: base_learning_rate: float, the starting constant for the lr schedule.", "task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number of tokens in program.') flags.DEFINE_integer('max_characters',", "for training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor", "host2devices[d.host_id].append(d) devices = [host2devices[k][0] for k in host2devices] host_psum =", "= jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32), jnp.ones(io_shape, jnp.float32),", "from last checkpoint. optimizer = checkpoints.restore_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), optimizer)", "optimizer. optimizer = jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast decoding. assert", "= models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs, programs, rngs={'dropout': dropout_rng}) loss,", "program beams.\"\"\" best_p, best_score = None, -1 # predicted shape", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "eval). def decode_io(inputs, outputs): \"\"\"Decode io examples tokens.\"\"\" def decode_str(s):", "bos_token] try: return dsl.decode_program(program.tolist(), id_token_table) except: # pylint: disable=bare-except return", "search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup * rsqrt_normalized_decay', warmup_steps=16000,", "where each batch item's data is expanded in-place # rather", "device per host.\"\"\" host2devices = collections.defaultdict(list) for d in jax.devices():", "beam_size) predicted = tohost(predicted) inputs, outputs, programs = map(tohost, (inputs,", "logging.info('Loading dataset from %s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:])", "dsl.decode_program(program.tolist(), id_token_table) except: # pylint: disable=bare-except return None # Program", "in hparam_str_dict: hparam_str_dict[key] = value hparam_str = ','.join(['%s=%s' % (shorten(k),", "all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message),", "lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad", "= input_pipeline.create_dataset_from_tf_record( FLAGS.dataset_filepattern, token_id_table, char_id_table) dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes,", "prediction # ----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand batch to desired", "% 1.0)))) else: raise ValueError('Unknown factor %s.' % name) return", "state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic metric", "----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs, programs, learning_rate_fn, config, dropout_rng): \"\"\"Train", "finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step # Replicate", "time.time() eval_metrics = [] for batches in eval_ds.as_numpy_iterator(): inputs, outputs,", "[host2devices[k][0] for k in host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x,", "= 0 pred_denominator = 0 ios, targets, predictions, top_of_beams =", "> best_score: best_p, best_score = p, score except: # pylint:", "flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.') flags.DEFINE_float('lr', 1e-3, 'Learning", "loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets, weights=None): \"\"\"Compute weighted accuracy for", "common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor =", "_internal = False if not _internal: flags.DEFINE_string('xm_parameters', None, 'String specifying", "base_config=base_config.replace( shift=False, deterministic=not use_dropout, decode=not FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng =", "' 'finetuning, or -1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether", "factor for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim',", "beam_size) encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask =", "second', steps_per_sec, step) for key, val in summary.items(): summary_writer.scalar('train/' +", "functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step = jax.pmap( functools.partial( predict_step,", "metrics_all = [] # Evaluation Metrics if (step and step", "metrics. if (step and step % FLAGS.predict_freq == 0) or", "hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters if FLAGS.xm_parameters: for", "s, step %d, score %.4f', slow_or_fast, beam_size, time.time() - t_inference_start,", "%s', ios[-1]) logging.info('target: %s', targets[-1]) beams_log = [] for beam", "# pylint: disable=cell-var-from-loop pred_batch = jax.tree_map( lambda x: pad_examples(x, padded_size),", "= optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params'])", "elif name == 'cosine_decay': progress = jnp.maximum(0.0, (step - warmup_steps)", "Setup DSL # --------------------------------------------------------------------------- # Build token tables. id_char_table =", "Using the above-defined single-step decoder function, run a # beam", "padded_shapes=padded_shapes) train_ds = dataset.skip(FLAGS.num_eval_steps).repeat() train_iter = train_ds.as_numpy_iterator() # Build Model", "'Directory to save results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training", "%.4f s step %d, loss: %.4f.', time.time()-t_evaluation_start, step, eval_summary['loss']) for", "2000000, 'Number of training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation", "'Decay factor for AdamW-style weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.')", "config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape,", "Save unreplicated optimizer + model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str),", "try: return dsl.decode_program(program.tolist(), id_token_table) except: # pylint: disable=bare-except return None", "'Number of training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.')", "in factors.split('*')] def step_fn(step): \"\"\"Step to learning rate function.\"\"\" ret", "'Number of program tasks in a batch.') flags.DEFINE_integer('num_strings_per_task', 4, 'Number", "mean_loss, logits step = optimizer.state.step lr = learning_rate_fn(step) grad_fn =", "for s in splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed) np.random.seed(FLAGS.seed) random.seed(FLAGS.seed)", "logits step = optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn,", "flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of", "eval_summary.items(): summary_writer.scalar('eval/' + key, val, step) summary_writer.flush() # Beam search", "'bos_full_attention', 'The kind of attention mask to use. Options are:", "warmup_steps) * decay_every: Every k steps decay the learning rate", "= key.split('_') return ''.join(s[0] for s in splits) def main(_):", ">= len(inps): pred_acc += 1 ios.append(' ; '.join(map(str, zip(inps, outs))))", "top_of_beam = [] for index, beam in enumerate(beams[:-5:-1]): try: decoded_program", "logging.info('Checking that start_step (%s) == finetune_start_step (%s)', start_step, FLAGS.finetune_start_step) assert", "weights.sum() return acc.sum(), normalizing_factor def compute_metrics(logits, targets, weights): \"\"\"Compute summary", "Got shape %s logits and %s targets' % (str(logits.shape), str(targets.shape)))", "Get hyperparmaters if FLAGS.xm_parameters: for key, value in json.loads(FLAGS.xm_parameters).items(): if", "True, 'Use slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for", "str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if", "'*' that defines the schedule. warmup_steps: how many steps to", "*= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress %", "logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape))", "entropy and entropy for log probs and targets. Args: logits:", "'Number of input/output strings per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number", "# Build Model and Optimizer # --------------------------------------------------------------------------- use_dropout = False", "use this file except in compliance with the License. #", "outputs, cache, beam_size, eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict translation with", "[] for n in np.random.choice(np.arange(len(predictions)), 8): text = (f'ios: {ios[n]}\\n\\ntarget:", "+ 1 bos_token = token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] # Parse", "= flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.') flags.DEFINE_float('lr',", "models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False,", "weight decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.')", "jnp.logical_and(programs != config.base_config.bos_token, programs != eos_token)), 1, 0).astype(jnp.float32) logits =", "None: loss = loss * weights normalizing_factor = weights.sum() return", "decode=False, bos_token=bos_token) train_config = models.DecomposeAttentionTransformerConfig( base_config=base_config, attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) eval_config =", "optimizer + model state. checkpoints.save_checkpoint( os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step)", "bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search returns [n_batch, n_beam,", "Remove singleton sequence-length dimension: # [batch * beam, 1, vocab]", "learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target)", "0) or is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start = time.time() eval_metrics", "only makes sense if we are using relative attention #", "decay. Returns: A function learning_rate(step): float -> {'learning_rate': float}, the", "str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())]) # Number of local devices", "emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads, num_layers=FLAGS.num_layers, qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout,", "= decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice", "\"\"\"Execute psum on in_tree's leaves over one device per host.\"\"\"", "optimizer) # Grab last step. start_step = int(optimizer.state.step) logging.info('Found model", "%s targets' % (str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss", "p_score = eval_predicted( beams, inps, outs, parse_beam_fn=decode_program) if p_score >=", "= np.sum([p_out == out for p_out, out in zip(p_outs, outputs)])", "host and flatten batch dimensions.\"\"\" n_device, n_batch, *remaining_dims = x.shape", "in enumerate(predicted): inps, outs = decode_io(inputs[i], outputs[i]) p, p_score =", "# Found solution. break return best_p, best_score def shorten(key): splits", "than handling it outside in the training loop - doing", "ret *= jnp.minimum(1.0, step / warmup_steps) elif name == 'rsqrt_decay':", "categorical targets `[batch, length]` int array. weights: None or array", "flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined single-step decoder", "rng = jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id()) rng, init_rng =", "tensorboard. if jax.host_id() == 0: slow_or_fast = 'slow' if FLAGS.slow_decode", "beam, 1, vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params': params,", "{ios[n]}\\n\\ntarget: {targets[n]}\\n\\n' f'predicted: {predictions[n]}\\n\\n' f'top of beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write", "common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs, programs) if not FLAGS.slow_decode else", "# Periodic metric handling. # Training Metrics if (step and", "inputs, outputs, method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs > 0, 1,", "from latent_programmer import models as base_models from latent_programmer.decomposition_transformer_attention import decomposition_models", "def tohost(x): \"\"\"Collect batches from all devices to host and", "== 'linear_warmup': ret *= jnp.minimum(1.0, step / warmup_steps) elif name", "mean_loss = loss / weight_sum return mean_loss, logits step =", "tile_dims[0] = batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def tohost(x):", "p, p_score = eval_predicted( beams, inps, outs, parse_beam_fn=decode_program) if p_score", "'slow' if FLAGS.slow_decode else 'fast' logging.info( 'Prediction time, %s (beam", "%d, score %.4f', slow_or_fast, beam_size, time.time() - t_inference_start, step, all_pred_acc", "TODO(jxihong): Implement fast decoding. assert FLAGS.slow_decode, 'Fast decoding is not", "method=models.DecomposeAttentionTransformer.encode), beam_size) encoded_padding_mask = jnp.where(outputs > 0, 1, 0).astype(jnp.float32) flat_encoded_padding_mask", "in compliance with the License. # You may obtain a", "[batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache", "optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits),", "software # distributed under the License is distributed on an", "attention computation for ' 'BOS tokens.') _internal = False if", "batch normalizing factor. \"\"\" if logits.ndim != targets.ndim + 1:", "1, vocab] flat_logits, new_vars = models.DecomposeAttentionTransformer( config=config).apply( {'params': params, 'cache':", "per second', steps_per_sec, step) for key, val in summary.items(): summary_writer.scalar('train/'", "tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits from decoder model.\"\"\" # -->", "from latent_programmer.tasks.robust_fill import tokens as dsl_tokens sys.path.append('../../') gfile = tf.io.gfile", "we # need to set up our decoder model to", "flax import linen as nn from flax import optim from", "'The kind of attention mask to use. Options are: baseline,", "weight_sum = compute_weighted_cross_entropy(logits, targets, weights) acc, _ = compute_weighted_accuracy(logits, targets,", "1, 0).astype(jnp.float32) flat_encoded_padding_mask = decode.flat_batch_beam_expand( encoded_padding_mask, beam_size) if slow_decode: def", "config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train Loop # ---------------------------------------------------------------------------", "eval / decode step functions. # ----------------------------------------------------------------------------- def train_step(optimizer, inputs,", "int( np.ceil(cur_pred_batch_size / n_devices) * n_devices) # pylint: disable=cell-var-from-loop pred_batch", "* len(x.shape) tile_dims[0] = batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)", "latent_programmer import models as base_models from latent_programmer.decomposition_transformer_attention import decomposition_models as", "initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape, dtype), jnp.ones(target_shape, dtype))", "# Train / eval / decode step functions. # -----------------------------------------------------------------------------", "\"\"\"Step to learning rate function.\"\"\" ret = 1.0 for name", "tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum on in_tree's leaves over one", "for inp, out in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps,", "key, val, step) summary_writer.flush() # Reset metric accumulation for next", "outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs def decode_program(program): \"\"\"Decode program", "odd-sized batch by padding instead of dropping it. cur_pred_batch_size =", "value in json.loads(FLAGS.xm_parameters).items(): if key not in hparam_str_dict: hparam_str_dict[key] =", "learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap( functools.partial( train_step,", "the schedule. warmup_steps: how many steps to warm up for", "collections import functools import json import os import random import", "optim from flax.metrics import tensorboard from flax.training import checkpoints from", "'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size * n_devices io_shape = (FLAGS.per_device_batch_size,", "Google Research Authors. # # Licensed under the Apache License,", "20, 50]: t_inference_start = time.time() pred_acc = 0 pred_denominator =", "[n_batch, n_beam, n_length] with beam dimension # sorted in increasing", "inputs] score = np.sum([p_out == out for p_out, out in", "= False base_config = base_models.TransformerConfig( vocab_size=io_vocab_size, output_vocab_size=program_vocab_size, shift=True, emb_dim=FLAGS.embedding_dim, num_heads=FLAGS.num_heads,", "for beam in beams: try: beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except", "base_models from latent_programmer.decomposition_transformer_attention import decomposition_models as models from latent_programmer.decomposition_transformer_attention import", "ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name ==", "return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def tohost(x): \"\"\"Collect batches from", "p, score except: # pylint: disable=bare-except pass if best_score >=", "slice to logits from decoder model.\"\"\" # --> [batch *", "logging.info('Gathering training metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr = metrics_all.pop('learning_rate').mean() metrics_sums", "outputs, programs)) pred_denominator += programs.shape[0] for i, beams in enumerate(predicted):", "n in factors.split('*')] def step_fn(step): \"\"\"Step to learning rate function.\"\"\"", "token_id_table, char_id_table) dataset = dataset.padded_batch( batch_size, padded_shapes=padded_shapes, drop_remainder=True) # Split", "metric handling. # Training Metrics if (step and step %", "[] for batches in eval_ds.as_numpy_iterator(): inputs, outputs, programs = common_utils.shard(batches)", "the factors string which can consist of: * constant: interpreted", "return compute_metrics(logits, programs, weights) def initialize_cache(inputs, outputs, programs, max_decode_len, config):", "for the lr schedule. factors: a string with factors separated", "raise ValueError( \"bos_special_attention doesn't work when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention,", "= optimizer_def.create(initial_variables['params']) del initial_variables # Don't keep a copy of", "flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of", "sys import time from absl import app from absl import", "Parse io and program token sequences (for eval). def decode_io(inputs,", "= [], [] for inp, out in zip(inputs, outputs): inps.append(decode_str(inp))", "config=predict_config), axis_name='batch') p_pred_step = jax.pmap( functools.partial( predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config,", "FLAGS.slow_decode else None) predicted = p_pred_step(optimizer.target, inputs, outputs, cache, beam_size)", "(inputs, outputs, programs)) pred_denominator += programs.shape[0] for i, beams in", "float -> {'learning_rate': float}, the step-dependent lr. \"\"\" factors =", "xs) def post_pmap(xs): return jax.tree_map(lambda x: x[0], xs) return post_pmap(host_psum(pre_pmap(in_tree)))", "; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: # pylint:", "step) summary_writer.flush() # Reset metric accumulation for next evaluation cycle.", "steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of steps between training logs.') flags.DEFINE_integer('eval_freq',", "= jax.tree_map( lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop", "return inps, outs def decode_program(program): \"\"\"Decode program tokens.\"\"\" program =", "encoded_padding_mask, beam_size) if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits", "import tensorboard from flax.training import checkpoints from flax.training import common_utils", "(i, char) in enumerate(dsl.CHARACTER)} char_id_table = {char: id for id,", "Metrics if (step and step % FLAGS.eval_freq == 0) or", "with the License. # You may obtain a copy of", "summary['learning_rate'] = lr # Calculate (clipped) perplexity after averaging log-perplexities:", "checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial checkpoint should start", "and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'): raise ValueError( \"bos_special_attention", "for key, val in summary.items(): summary_writer.scalar('train/' + key, val, step)", "FLAGS.slow_decode), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id())", "jnp.prod(jnp.asarray(targets.shape)) if weights is not None: acc = acc *", "entropy for log probs and targets. Args: logits: `[batch, length,", "base_learning_rate=FLAGS.lr) else: # Constant LR for finetuning. learning_rate_fn = create_learning_rate_scheduler(", "= per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record beam search results", "None, 'Directory to save results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of", "consist of: * constant: interpreted as the constant value, *", "batches # Handle final odd-sized batch by padding instead of", "- x.shape[0] tile_dims = [1] * len(x.shape) tile_dims[0] = batch_pad", "name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_weighted_cross_entropy(logits, targets, weights=None):", "step: %d, loss: %.4f', step, summary['loss']) tock = time.time() steps_per_sec", "attention # and it's not the baseline. if FLAGS.bos_special_attention and", "(step and step % FLAGS.log_freq == 0) or is_last_step: logging.info('Gathering", "beams_log = [] for beam in beams: try: beams_log.append(decode_program(beam).to_string()) except:", "} metrics = jax.lax.psum(metrics, 'batch') return metrics # Train /", "dataset from %s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes:", "targets' % (str(logits.shape), str(targets.shape))) onehot_targets = common_utils.onehot(targets, logits.shape[-1]) loss =", "express or implied. # See the License for the specific", "ValueError( \"bos_special_attention doesn't work when use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type))", "except in compliance with the License. # You may obtain", "rate schedule. Interprets factors in the factors string which can", "compute_metrics(logits, targets, weights): \"\"\"Compute summary metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits,", "return x.reshape((n_device * n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum", "the training loop - doing the # latter can add", "(for eval). def decode_io(inputs, outputs): \"\"\"Decode io examples tokens.\"\"\" def", "# pylint: disable=bare-except beams_log.append('Did not compile') logging.info('predicted beam: %s', '\\n'.join(beams_log))", "in summary.items(): summary_writer.scalar('train/' + key, val, step) summary_writer.flush() # Reset", "outputs): \"\"\"Decode io examples tokens.\"\"\" def decode_str(s): \"\"\"Decode string tokens.\"\"\"", "= metrics_sums.pop('denominator') summary = jax.tree_map( lambda x: x / denominator,", "metric accumulation for next evaluation cycle. metrics_all = [] #", "<= 0: learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant LR", "evaluation cycle. metrics_all = [] # Evaluation Metrics if (step", "bos_special_attention=FLAGS.bos_special_attention) rng = jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id()) rng, init_rng", "metrics, new_dropout_rng def eval_step(params, inputs, outputs, programs, eos_token, config): \"\"\"Collect", "'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of", "Write to tensorboard. if jax.host_id() == 0: slow_or_fast = 'slow'", "for in the warmup schedule. decay_factor: The amount to decay", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "seq-to-seq model on random supervised training tasks.\"\"\" # pytype: disable=wrong-arg-count", "= p, score except: # pylint: disable=bare-except pass if best_score", "decoder model to handle a batch size equal to #", "not compile') logging.info('predicted beam: %s', '\\n'.join(beams_log)) top_of_beam = [] for", "!= config.base_config.bos_token, programs != eos_token)), 1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply(", "params, 'cache': flat_cache}, flat_ids, flat_encoded, flat_encoded_padding_mask, mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache =", "def decode_io(inputs, outputs): \"\"\"Decode io examples tokens.\"\"\" def decode_str(s): \"\"\"Decode", "/ denominator, # pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr #", "prediction?') flags.DEFINE_string('dataset_filepattern', None, 'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number", "over possible sequences given input encoding. beam_seqs, _ = decode.beam_search(", "in jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0] for k in host2devices]", "program tokens.\"\"\" program = program[:np.argmax(program == eos_token) + 1].astype(np.int32) program", "step / warmup_steps) elif name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0,", "CONDITIONS OF ANY KIND, either express or implied. # See", "[n.strip() for n in factors.split('*')] def step_fn(step): \"\"\"Step to learning", "use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to use special", "dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step = step == FLAGS.num_train_steps - 1 #", "decoding is not implemented yet.' if FLAGS.finetune_start_step <= 0: learning_rate_fn", "= common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs, programs) if not FLAGS.slow_decode", "import time from absl import app from absl import flags", "\"\"\"Train seq-to-seq model on random supervised training tasks.\"\"\" # pytype:", "weights is not None: loss = loss * weights normalizing_factor", "!= targets.ndim + 1: raise ValueError('Incorrect shapes. Got shape %s", "pred_batch[0].shape[0] if cur_pred_batch_size % n_devices: padded_size = int( np.ceil(cur_pred_batch_size /", "+ 1 # For padding. program_vocab_size = len(token_id_table) + 1", "as base_models from latent_programmer.decomposition_transformer_attention import decomposition_models as models from latent_programmer.decomposition_transformer_attention", "inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics) eval_metrics_sums = jax.tree_map(jnp.sum,", "or is_last_step: logging.info('Gathering beam search metrics.') for beam_size in [1,", "'Maximum number of tokens in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number", "targets' % (str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) normalizing_factor", "FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'): raise ValueError( \"bos_special_attention doesn't work", "if (step and step % FLAGS.eval_freq == 0) or is_last_step:", "= jax.lax.psum(metrics, 'batch') return metrics # Train / eval /", "mutable=['cache'], method=models.DecomposeAttentionTransformer.decode) new_flat_cache = new_vars['cache'] # Remove singleton sequence-length dimension:", "eval_ds = dataset.take(FLAGS.num_eval_steps) # Decrease batch of predict dataset to", "{}, decoded: {}, tokens: {}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc,", "on random supervised training tasks.\"\"\" # pytype: disable=wrong-arg-count # pytype:", "logits: `[batch, length, num_classes]` float array. targets: categorical targets `[batch,", "jax.numpy as jnp import numpy as np import tensorflow.compat.v2 as", "jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step,", "'accuracy': acc, 'denominator': weight_sum, } metrics = jax.lax.psum(metrics, 'batch') return", "to warm up for in the warmup schedule. decay_factor: The", "inputs, outputs, programs, learning_rate_fn, config, dropout_rng): \"\"\"Train on batch of", "to save results to.') flags.DEFINE_integer('num_train_steps', 2000000, 'Number of training steps.')", "(decay_factor**(step // steps_per_decay)) elif name == 'cosine_decay': progress = jnp.maximum(0.0,", "lr schedule. factors: a string with factors separated by '*'", "= step == FLAGS.num_train_steps - 1 # Save a Checkpoint", "decay.') flags.DEFINE_integer('embedding_dim', 256, 'Embedding dimension.') flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads',", "or is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start = time.time() eval_metrics =", "= FLAGS.per_device_batch_size * n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape", "beam:\\n\\n{top_of_beams[n]}\\n\\n') message.append(text) # Write to tensorboard. if jax.host_id() == 0:", "as linear warmup until warmup_steps, * rsqrt_decay: divide by square", "FLAGS.log_freq == 0) or is_last_step: logging.info('Gathering training metrics.') metrics_all =", "of shape [batch, length, 1] Returns: Tuple of scalar loss", "in program.') flags.DEFINE_integer('max_characters', 120, 'Maximum number of characters in input/output", "in the warmup schedule. decay_factor: The amount to decay the", "tokens: {}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap(", "models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape, jnp.float32),", "beam search over possible sequences given input encoding. beam_seqs, _", "program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL # --------------------------------------------------------------------------- #", "step_fn def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted cross entropy and", "* weights normalizing_factor = weights.sum() return acc.sum(), normalizing_factor def compute_metrics(logits,", "1 # For padding. program_vocab_size = len(token_id_table) + 1 bos_token", "for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program tasks in", "0, 1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function used for training.\"\"\"", "batches from all devices to host and flatten batch dimensions.\"\"\"", "search metrics. if (step and step % FLAGS.predict_freq == 0)", "'Fixed random seed for training.') flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay',", "for key, val in eval_summary.items(): summary_writer.scalar('eval/' + key, val, step)", "\" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed,", "json import os import random import sys import time from", "rate by. steps_per_decay: How often to decay the learning rate.", "2021 The Google Research Authors. # # Licensed under the", "steps_per_sec = FLAGS.log_freq / (tock - tick) tick = tock", "input/output strings per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number of tokens", "flags from absl import logging from flax import jax_utils from", "pmap, rather # than handling it outside in the training", "if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str))", "'batch') return metrics # Train / eval / decode step", "# --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count()) del rng metrics_all =", "os.path.join(FLAGS.save_dir, 'checkpoints', hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic metric handling. #", "eval_summary['loss']) for key, val in eval_summary.items(): summary_writer.scalar('eval/' + key, val,", "that defines the schedule. warmup_steps: how many steps to warm", "jax.random.split(dropout_rng) weights = jnp.where(programs > 0, 1, 0).astype(jnp.float32) def loss_fn(params):", "token_id_table[dsl.BOS] eos_token = token_id_table[dsl.EOS] # Parse io and program token", "eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config),", "+ key, val, step) summary_writer.flush() # Beam search metrics. if", "= lr return new_optimizer, metrics, new_dropout_rng def eval_step(params, inputs, outputs,", "not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters", "return ''.join(s[0] for s in splits) def main(_): tf.enable_v2_behavior() tf.random.set_seed(FLAGS.seed)", "create_learning_rate_scheduler( base_learning_rate=FLAGS.lr, factors='constant') p_train_step = jax.pmap( functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config),", "specifying hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5, factors='constant * linear_warmup *", "= len(char_id_table) + 1 # For padding. program_vocab_size = len(token_id_table)", "n_devices) # pylint: disable=cell-var-from-loop pred_batch = jax.tree_map( lambda x: pad_examples(x,", "program token sequences (for eval). def decode_io(inputs, outputs): \"\"\"Decode io", "'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr)", "* linear_warmup * rsqrt_normalized_decay', warmup_steps=16000, decay_factor=0.5, steps_per_decay=50000, steps_per_cycle=100000): \"\"\"Creates learning", "new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics. metrics = compute_metrics(logits,", "or FLAGS.attention_mask_type == 'baseline'): raise ValueError( \"bos_special_attention doesn't work when", "'rsqrt_normalized_decay': ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name", "can consist of: * constant: interpreted as the constant value,", "devices for this host. n_devices = jax.local_device_count() if jax.host_id() ==", "pylint: disable=cell-var-from-loop pred_batch = jax.tree_map( lambda x: pad_examples(x, padded_size), pred_batch)", "metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng def eval_step(params, inputs,", "--> [batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits,", "decoding. assert FLAGS.slow_decode, 'Fast decoding is not implemented yet.' if", "= models.DecomposeAttentionTransformer(eval_config) initial_variables = jax.jit(m.init)( {'params': init_rng, 'dropout': init_rng}, jnp.ones(io_shape,", "in eval_summary.items(): summary_writer.scalar('eval/' + key, val, step) summary_writer.flush() # Beam", "raise ValueError('Incorrect shapes. Got shape %s logits and %s targets'", "Implement fast decoding. assert FLAGS.slow_decode, 'Fast decoding is not implemented", "targets, weights) metrics = { 'loss': loss, 'accuracy': acc, 'denominator':", "jax.host_id() == 0: logging.info('Train in step: %d, loss: %.4f', step,", "flags.DEFINE_integer('hidden_dim', 512, 'Hidden dimension.') flags.DEFINE_integer('num_heads', 4, 'Number of layers.') flags.DEFINE_integer('num_layers',", "'linear_warmup': ret *= jnp.minimum(1.0, step / warmup_steps) elif name ==", "amount to decay the learning rate by. steps_per_decay: How often", "\"\"\"Decode program tokens.\"\"\" program = program[:np.argmax(program == eos_token) + 1].astype(np.int32)", "'Prediction time, %s (beam %d): %.4f s, step %d, score", "None, 'Filepattern for TFRecord dataset.') flags.DEFINE_integer('per_device_batch_size', 16, 'Number of program", "latter can add some stalls to the devices. dropout_rng, new_dropout_rng", "python3 \"\"\"Train seq-to-seq model on random supervised training tasks.\"\"\" #", "jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator') summary = jax.tree_map( lambda x:", "'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) # Get metrics. metrics =", "0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) else:", "# and it's not the baseline. if FLAGS.bos_special_attention and (not", "decay the learning rate by. steps_per_decay: How often to decay", "slow_or_fast = 'slow' if FLAGS.slow_decode else 'fast' logging.info( 'Prediction time,", "pred_acc += 1 ios.append(' ; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try:", "axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token, config=eval_config), axis_name='batch') p_init_cache =", "tensorflow.compat.v2 as tf from latent_programmer import decode from latent_programmer import", "jax.host_id() == 0: # Save unreplicated optimizer + model state.", "Loop # --------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count()) del rng metrics_all", "{}, tokens: {}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator =", "metrics_sums) summary['learning_rate'] = lr # Calculate (clipped) perplexity after averaging", "dropout_rng = p_train_step( optimizer, inputs, outputs, programs, dropout_rng=dropout_rng) metrics_all.append(metrics) is_last_step", "handling it outside in the training loop - doing the", "collections.defaultdict(list) for d in jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0] for", "%s', FLAGS.dataset_filepattern) padded_shapes = (io_shape[1:], io_shape[1:], program_shape[1:]) logging.info('padded_shapes: %s', padded_shapes)", "max_decode_len, config, slow_decode=True): \"\"\"Predict translation with fast decoding beam search", "pred_batch) inputs, outputs, programs = common_utils.shard(pred_batch) cache = (p_init_cache(inputs, outputs,", "/ float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi", "== 'constant': ret *= base_learning_rate elif name == 'linear_warmup': ret", "from decoder model.\"\"\" # --> [batch * beam, 1, vocab]", "int array. weights: None or array of shape [batch, length,", "strings per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum number of tokens in", "Returns: Tuple of scalar accuracy and batch normalizing factor. \"\"\"", "used for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs,", "{'params': params}, flat_ids, flat_encoded, flat_encoded_padding_mask, method=models.DecomposeAttentionTransformer.decode) return flat_logits else: def", "given input shape and max decode length.\"\"\" target_shape = (programs.shape[0],", "dtype = config.base_config.dtype initial_variables = models.DecomposeAttentionTransformer(config).init( jax.random.PRNGKey(0), jnp.ones(inputs.shape, dtype), jnp.ones(outputs.shape,", "Save a Checkpoint if (step % FLAGS.checkpoint_freq == 0 and", "single-step decoder function, run a # beam search over possible", "all_pred_denominator, step) summary_writer.text('samples-{}'.format(beam_size), '\\n------\\n'.join(message), step) summary_writer.flush() if __name__ == '__main__':", "config): \"\"\"Collect metrics for evaluation during training.\"\"\" weights = jnp.where(", "*= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == 'decay_every':", "Get metrics. metrics = compute_metrics(logits, programs, weights) metrics['learning_rate'] = lr", "FLAGS.per_device_batch_size * n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape =", "loss_fn(params): \"\"\"Loss function used for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params':", "dropout_rng): \"\"\"Train on batch of program tasks.\"\"\" # We handle", "beams.\"\"\" best_p, best_score = None, -1 # predicted shape [beam_size,", "leaves over one device per host.\"\"\" host2devices = collections.defaultdict(list) for", "id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) + 1 #", "if slow_decode: def tokens_ids_to_logits(flat_ids): \"\"\"Token slice to logits from decoder", "_ = compute_weighted_accuracy(logits, targets, weights) metrics = { 'loss': loss,", "4, 'Number of input/output strings per task.') flags.DEFINE_integer('max_program_length', 100, 'Maximum", "decode step functions. # ----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs, programs,", "if score > best_score: best_p, best_score = p, score except:", "functions. # ----------------------------------------------------------------------------- def train_step(optimizer, inputs, outputs, programs, learning_rate_fn, config,", "Decrease batch of predict dataset to handle beam search. predict_ds", "learning_rate_fn = create_learning_rate_scheduler( base_learning_rate=FLAGS.lr) else: # Constant LR for finetuning.", "== 0) or is_last_step: logging.info('Gathering evaluation metrics.') t_evaluation_start = time.time()", "optimizer_def = optim.Adam( FLAGS.lr, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=FLAGS.weight_decay) optimizer =", "targets, weights): \"\"\"Compute summary metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits, targets,", "= tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size * n_devices", "weights) acc, _ = compute_weighted_accuracy(logits, targets, weights) metrics = {", "nn from flax import optim from flax.metrics import tensorboard from", "of shape [batch, length, 1] Returns: Tuple of scalar accuracy", "to use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to use", "from absl import flags from absl import logging from flax", "in zip(inputs, outputs): inps.append(decode_str(inp)) outs.append(decode_str(out)) return inps, outs def decode_program(program):", "shorten(key): splits = key.split('_') return ''.join(s[0] for s in splits)", "= collections.defaultdict(list) for d in jax.devices(): host2devices[d.host_id].append(d) devices = [host2devices[k][0]", "Train / eval / decode step functions. # ----------------------------------------------------------------------------- def", "[1, 5, 10, 20, 50]: t_inference_start = time.time() pred_acc =", "and it's not the baseline. if FLAGS.bos_special_attention and (not FLAGS.use_relative_attention", "bos_special_attention=FLAGS.bos_special_attention) eval_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace(deterministic=not use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config =", "predict dataset to handle beam search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size", "id_char_table.items()} id_token_table, token_id_table = dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) + 1", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "weights normalizing_factor = weights.sum() return loss.sum(), normalizing_factor def compute_weighted_accuracy(logits, targets,", "params}, inputs, outputs, programs, rngs={'dropout': dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits,", "_internal: flags.DEFINE_string('xm_parameters', None, 'String specifying hyperparamter search.') def create_learning_rate_scheduler( base_learning_rate=0.5,", "start at for ' 'finetuning, or -1 if not finetuning.')", "How often to decay the learning rate. steps_per_cycle: Steps per", "training steps.') flags.DEFINE_integer('num_eval_steps', 10, 'Number of evaluation steps.') flags.DEFINE_integer('log_freq', 1000,", "axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None: loss", "length.\"\"\" target_shape = (programs.shape[0], max_decode_len) dtype = config.base_config.dtype initial_variables =", "if jax.host_id() == 0: logging.info('Train in step: %d, loss: %.4f',", "absl import app from absl import flags from absl import", "shape %s logits and %s targets' % (str(logits.shape), str(targets.shape))) onehot_targets", "for beam search, we # need to set up our", "eos_token) + 1].astype(np.int32) program = program[program != bos_token] try: return", "= ','.join(['%s=%s' % (shorten(k), str(hparam_str_dict[k])) for k in sorted(hparam_str_dict.keys())]) #", "= metrics_all.pop('learning_rate').mean() metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator') summary", "inputs, outputs, cache, beam_size) predicted = tohost(predicted) inputs, outputs, programs", "initial_variables['cache'] def predict_step(params, inputs, outputs, cache, beam_size, eos_token, max_decode_len, config,", "def compute_weighted_cross_entropy(logits, targets, weights=None): \"\"\"Compute weighted cross entropy and entropy", "0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir, 'tb', hparam_str)) batch_size = FLAGS.per_device_batch_size", "if (step and step % FLAGS.predict_freq == 0) or is_last_step:", "beams, inps, outs, parse_beam_fn=decode_program) if p_score >= len(inps): pred_acc +=", "*= jnp.minimum(1.0, step / warmup_steps) elif name == 'rsqrt_decay': ret", "def eval_step(params, inputs, outputs, programs, eos_token, config): \"\"\"Collect metrics for", "outs.append(decode_str(out)) return inps, outs def decode_program(program): \"\"\"Decode program tokens.\"\"\" program", "= {i+1: char for (i, char) in enumerate(dsl.CHARACTER)} char_id_table =", "Found solution. break return best_p, best_score def shorten(key): splits =", "of the initial model. start_step = 0 if FLAGS.restore_checkpoints: #", "model.\"\"\" # --> [batch * beam, 1, vocab] flat_logits, new_vars", "with fast decoding beam search on a batch.\"\"\" # Prepare", "= jax_utils.replicate(optimizer) # TODO(jxihong): Implement fast decoding. assert FLAGS.slow_decode, 'Fast", "not FLAGS.dataset_filepattern: raise ValueError('Must specify filepattern to dataset.') # Training", "beam search. predict_ds = eval_ds.unbatch().padded_batch( int(np.ceil(batch_size / 10)), padded_shapes=padded_shapes) train_ds", "Version 2.0 (the \"License\"); # you may not use this", "[], [], [] for batches in predict_ds.as_numpy_iterator(): pred_batch = batches", "not FLAGS.slow_decode else None) predicted = p_pred_step(optimizer.target, inputs, outputs, cache,", "disable=bare-except decoded_program = 'Did not compile' top_of_beam.append('index: {}, decoded: {},", "jax_utils from flax import linen as nn from flax import", "separated by '*' that defines the schedule. warmup_steps: how many", "axis_name='batch') p_init_cache = jax.pmap( functools.partial( initialize_cache, max_decode_len=FLAGS.max_program_length, config=predict_config), axis_name='batch') p_pred_step", "predict_step, eos_token=eos_token, max_decode_len=FLAGS.max_program_length, config=predict_config, slow_decode=FLAGS.slow_decode), axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train", "sorted in increasing order of log-probability. return beam_seqs # Util", "'i', devices=devices) def pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) +", "top_of_beam.append('index: {}, decoded: {}, tokens: {}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam))", "program[:np.argmax(program == eos_token) + 1].astype(np.int32) program = program[program != bos_token]", "program[program != bos_token] try: return dsl.decode_program(program.tolist(), id_token_table) except: # pylint:", "factors: a string with factors separated by '*' that defines", "the constant value, * linear_warmup: interpreted as linear warmup until", "of dropping it. cur_pred_batch_size = pred_batch[0].shape[0] if cur_pred_batch_size % n_devices:", "'Whether to use relative positonal embeddings.') flags.DEFINE_bool('bos_special_attention', False, 'Whether to", "flags.DEFINE_integer('finetune_start_step', -1, 'Step the initial checkpoint should start at for", "-1 if not finetuning.') flags.DEFINE_bool('restore_checkpoints', True, 'Whether to restore from", "outputs, programs) if not FLAGS.slow_decode else None) predicted = p_pred_step(optimizer.target,", "= common_utils.onehot(targets, logits.shape[-1]) loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor", "if (step % FLAGS.checkpoint_freq == 0 and step > 0)", "all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) # Record beam", "by applicable law or agreed to in writing, software #", "steps_per_cycle: Steps per cycle when using cosine decay. Returns: A", "= jax.local_device_count() if jax.host_id() == 0: summary_writer = tensorboard.SummaryWriter( os.path.join(FLAGS.save_dir,", "n_devices: padded_size = int( np.ceil(cur_pred_batch_size / n_devices) * n_devices) #", "= batches # Handle final odd-sized batch by padding instead", "pylint: disable=cell-var-from-loop metrics_sums) summary['learning_rate'] = lr # Calculate (clipped) perplexity", "to use special relative attention computation for ' 'BOS tokens.')", "Program does not compile. # Load Dataset # --------------------------------------------------------------------------- logging.info('Initializing", "# pylint: disable=cell-var-from-loop eval_metrics_sums) if jax.host_id() == 0: logging.info('Evaluation time:", "a given input shape and max decode length.\"\"\" target_shape =", "*= base_learning_rate elif name == 'linear_warmup': ret *= jnp.minimum(1.0, step", "# latter can add some stalls to the devices. dropout_rng,", "decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args:", "jax.random.PRNGKey(FLAGS.seed) rng = jax.random.fold_in(rng, jax.host_id()) rng, init_rng = jax.random.split(rng) m", "axis_name='batch', static_broadcasted_argnums=(4,)) # Main Train Loop # --------------------------------------------------------------------------- dropout_rng =", "'Number of evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of steps between", "FLAGS.predict_freq == 0) or is_last_step: logging.info('Gathering beam search metrics.') for", "10, 'Number of evaluation steps.') flags.DEFINE_integer('log_freq', 1000, 'Number of steps", "gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict = dict(seed=FLAGS.seed, lr=FLAGS.lr) # Get hyperparmaters if", "\"\"\"Loss function used for training.\"\"\" logits = models.DecomposeAttentionTransformer(config).apply( {'params': params},", "%s targets' % (str(logits.shape), str(targets.shape))) acc = jnp.equal(jnp.argmax(logits, axis=-1), targets)", "else None) predicted = p_pred_step(optimizer.target, inputs, outputs, cache, beam_size) predicted", "index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc,", "--------------------------------------------------------------------------- dropout_rng = jax.random.split(rng, jax.local_device_count()) del rng metrics_all = []", "try: beams_log.append(decode_program(beam).to_string()) except: # pylint: disable=bare-except beams_log.append('Did not compile') logging.info('predicted", "'.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: # pylint: disable=bare-except", "the devices. dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) weights = jnp.where(programs >", "divide by square root of max(step, warmup_steps) * decay_every: Every", "factors: if name == 'constant': ret *= base_learning_rate elif name", "flags.DEFINE_float('lr', 1e-3, 'Learning rate.') flags.DEFINE_float('weight_decay', 1e-1, 'Decay factor for AdamW-style", "io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) #", "length] for beam in predicted[::-1]: try: p = parse_beam_fn(beam) p_outs", "constant value, * linear_warmup: interpreted as linear warmup until warmup_steps,", "% FLAGS.checkpoint_freq == 0 and step > 0) or is_last_step:", "# We handle PRNG splitting inside the top pmap, rather", "start_step, FLAGS.finetune_start_step) assert start_step == FLAGS.finetune_start_step # Replicate optimizer. optimizer", "programs = common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng = p_train_step( optimizer, inputs,", "predicted[::-1]: try: p = parse_beam_fn(beam) p_outs = [p(inp) for inp", "start_step) if FLAGS.finetune_start_step > 0: logging.info('Checking that start_step (%s) ==", "batch_pad return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0) def tohost(x): \"\"\"Collect batches", "applicable law or agreed to in writing, software # distributed", "\"\"\"Decode io examples tokens.\"\"\" def decode_str(s): \"\"\"Decode string tokens.\"\"\" return", "dimensions.\"\"\" n_device, n_batch, *remaining_dims = x.shape return x.reshape((n_device * n_batch,)", "summary metrics.\"\"\" loss, weight_sum = compute_weighted_cross_entropy(logits, targets, weights) acc, _", "qkv_dim=FLAGS.embedding_dim, mlp_dim=FLAGS.hidden_dim, max_len=max(FLAGS.max_characters, FLAGS.max_program_length), use_relative_attention=FLAGS.use_relative_attention, deterministic=not use_dropout, decode=False, bos_token=bos_token) train_config", "loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1) normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if", "Prepare transformer fast-decoder call for beam search: for beam search,", "# pylint: disable=bare-except pass if best_score >= len(inputs): # Found", "[] for beam in beams: try: beams_log.append(decode_program(beam).to_string()) except: # pylint:", "FLAGS.bos_special_attention and (not FLAGS.use_relative_attention or FLAGS.attention_mask_type == 'baseline'): raise ValueError(", "jax.host_id() == 0: logging.info('Evaluation time: %.4f s step %d, loss:", "layers.') flags.DEFINE_integer('num_layers', 3, 'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use", "name == 'rsqrt_decay': ret /= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif", "checkpoint should start at for ' 'finetuning, or -1 if", "None or array of shape [batch, length, 1] Returns: Tuple", "doing the # latter can add some stalls to the", "compute_weighted_cross_entropy(logits, programs, weights) mean_loss = loss / weight_sum return mean_loss,", "'i'), 'i', devices=devices) def pre_pmap(xs): return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,)", "beam_size, eos_token, max_decode_len, config, slow_decode=True): \"\"\"Predict translation with fast decoding", "int(optimizer.state.step) logging.info('Found model checkpointed at step %d.', start_step) if FLAGS.finetune_start_step", "targets.ndim + 1: raise ValueError('Incorrect shapes. Got shape %s logits", "dsl_tokens sys.path.append('../../') gfile = tf.io.gfile FLAGS = flags.FLAGS flags.DEFINE_integer('seed', 0,", "from all devices to host and flatten batch dimensions.\"\"\" n_device,", "for this host. n_devices = jax.local_device_count() if jax.host_id() == 0:", "jax.lax.psum(metrics, 'batch') return metrics # Train / eval / decode", "= jnp.prod(jnp.asarray(targets.shape)) if weights is not None: acc = acc", "tokens_ids_to_logits, beam_size=beam_size, alpha=0.6, bos_token=config.base_config.bos_token, eos_token=eos_token, max_decode_len=max_decode_len, slow_decode=slow_decode) # Beam search", "== 0) or is_last_step: logging.info('Gathering training metrics.') metrics_all = common_utils.get_metrics(metrics_all)", "# For padding. program_vocab_size = len(token_id_table) + 1 bos_token =", "Authors. # # Licensed under the Apache License, Version 2.0", "ValueError('Incorrect shapes. Got shape %s logits and %s targets' %", "token_id_table = dsl_tokens.build_token_tables() io_vocab_size = len(char_id_table) + 1 # For", "flags.DEFINE_integer('num_heads', 4, 'Number of layers.') flags.DEFINE_integer('num_layers', 3, 'Number of Transformer", "(FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length) # Setup DSL", "= time.time() steps_per_sec = FLAGS.log_freq / (tock - tick) tick", "by padding instead of dropping it. cur_pred_batch_size = pred_batch[0].shape[0] if", "by repeating last slice.\"\"\" batch_pad = desired_batch_size - x.shape[0] tile_dims", "{}'.format( index, decoded_program, beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array,", "step) # Periodic metric handling. # Training Metrics if (step", "= parse_beam_fn(beam) p_outs = [p(inp) for inp in inputs] score", "# You may obtain a copy of the License at", "pred_denominator = 0 ios, targets, predictions, top_of_beams = [], [],", "predicted = tohost(predicted) inputs, outputs, programs = map(tohost, (inputs, outputs,", "k in host2devices] host_psum = jax.pmap(lambda x: jax.lax.psum(x, 'i'), 'i',", "summary_writer.flush() # Reset metric accumulation for next evaluation cycle. metrics_all", "# BOS special attention only makes sense if we are", "the initial model. start_step = 0 if FLAGS.restore_checkpoints: # Restore", "step in range(start_step, FLAGS.num_train_steps): inputs, outputs, programs = common_utils.shard(next(train_iter)) optimizer,", "decode from latent_programmer import models as base_models from latent_programmer.decomposition_transformer_attention import", "dtype), jnp.ones(target_shape, dtype)) return initial_variables['cache'] def predict_step(params, inputs, outputs, cache,", "flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps between checkpoint saves.') flags.DEFINE_integer('finetune_start_step', -1,", "for beam in predicted[::-1]: try: p = parse_beam_fn(beam) p_outs =", "compute_weighted_accuracy(logits, targets, weights) metrics = { 'loss': loss, 'accuracy': acc,", "// steps_per_decay)) elif name == 'cosine_decay': progress = jnp.maximum(0.0, (step", "ios[-1]) logging.info('target: %s', targets[-1]) beams_log = [] for beam in", "eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics) eval_denominator = eval_metrics_sums.pop('denominator') eval_summary = jax.tree_map(", "inps, outs, parse_beam_fn=decode_program) if p_score >= len(inps): pred_acc += 1", "parse_beam_fn): \"\"\"Evaluate predicted program beams.\"\"\" best_p, best_score = None, -1", "np.random.seed(FLAGS.seed) random.seed(FLAGS.seed) # BOS special attention only makes sense if", "metrics_sums = jax.tree_map(jnp.sum, metrics_all) denominator = metrics_sums.pop('denominator') summary = jax.tree_map(", "is not None: acc = acc * weights normalizing_factor =", "'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for", "# than handling it outside in the training loop -", "0) or is_last_step: logging.info('Gathering training metrics.') metrics_all = common_utils.get_metrics(metrics_all) lr", "beam_size, where each batch item's data is expanded in-place #", "1, 0).astype(jnp.float32) def loss_fn(params): \"\"\"Loss function used for training.\"\"\" logits", "time.time()-t_evaluation_start, step, eval_summary['loss']) for key, val in eval_summary.items(): summary_writer.scalar('eval/' +", "Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding for prediction?') flags.DEFINE_string('dataset_filepattern',", "(beam search).') flags.DEFINE_integer('checkpoint_freq', 50000, 'Number of steps between checkpoint saves.')", "functools.partial( train_step, learning_rate_fn=learning_rate_fn, config=train_config), axis_name='batch') p_eval_step = jax.pmap( functools.partial(eval_step, eos_token=eos_token,", "logging.info( 'Prediction time, %s (beam %d): %.4f s, step %d,", "= 0 ios, targets, predictions, top_of_beams = [], [], [],", "del initial_variables # Don't keep a copy of the initial", "ios.append(' ; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except: #", "jax.random.fold_in(rng, jax.host_id()) rng, init_rng = jax.random.split(rng) m = models.DecomposeAttentionTransformer(eval_config) initial_variables", "----------------------------------------------------------------------------- def pad_examples(x, desired_batch_size): \"\"\"Expand batch to desired size by", "best_p, best_score = p, score except: # pylint: disable=bare-except pass", "examples tokens.\"\"\" def decode_str(s): \"\"\"Decode string tokens.\"\"\" return ''.join([id_char_table[c_id] for", "summaries. message = [] for n in np.random.choice(np.arange(len(predictions)), 8): text", "/= jnp.sqrt(jnp.maximum(1.0, step - warmup_steps)) elif name == 'rsqrt_normalized_decay': ret", "handle PRNG splitting inside the top pmap, rather # than", "root of max(step, warmup_steps) * decay_every: Every k steps decay", "weight_decay=FLAGS.weight_decay) optimizer = optimizer_def.create(initial_variables['params']) del initial_variables # Don't keep a", "flags.FLAGS flags.DEFINE_integer('seed', 0, 'Fixed random seed for training.') flags.DEFINE_float('lr', 1e-3,", "input/output strings.') flags.DEFINE_string('save_dir', None, 'Directory to save results to.') flags.DEFINE_integer('num_train_steps',", "ValueError('Unknown factor %s.' % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn", "k in sorted(hparam_str_dict.keys())]) # Number of local devices for this", "n_devices io_shape = (FLAGS.per_device_batch_size, FLAGS.num_strings_per_task, FLAGS.max_characters) program_shape = (FLAGS.per_device_batch_size, FLAGS.max_program_length)", "if key not in hparam_str_dict: hparam_str_dict[key] = value hparam_str =", "vocab] flat_logits = flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the", "{'params': params}, inputs, outputs, programs, rngs={'dropout': dropout_rng}) loss, weight_sum =", "shape [batch, length, 1] Returns: Tuple of scalar accuracy and", "For padding. program_vocab_size = len(token_id_table) + 1 bos_token = token_id_table[dsl.BOS]", "return initial_variables['cache'] def predict_step(params, inputs, outputs, cache, beam_size, eos_token, max_decode_len,", "a Checkpoint if (step % FLAGS.checkpoint_freq == 0 and step", "absl import logging from flax import jax_utils from flax import", "raise ValueError('Unknown factor %s.' % name) return jnp.asarray(ret, dtype=jnp.float32) return", "step, all_pred_acc / all_pred_denominator) summary_writer.scalar( 'predict-{}/score-{}'.format(slow_or_fast, beam_size), all_pred_acc / all_pred_denominator,", "for p_out, out in zip(p_outs, outputs)]) if score > best_score:", "val in summary.items(): summary_writer.scalar('train/' + key, val, step) summary_writer.flush() #", "\"\"\"Creates learning rate schedule. Interprets factors in the factors string", "vocab] --> [batch * beam, vocab] flat_logits = flat_logits.squeeze(axis=1) return", "base_learning_rate: float, the starting constant for the lr schedule. factors:", "= flat_logits.squeeze(axis=1) return flat_logits, new_flat_cache # Using the above-defined single-step", "normalizing_factor = jnp.prod(jnp.asarray(targets.shape)) if weights is not None: loss =", "specify filepattern to dataset.') # Training dataset. logging.info('Loading dataset from", "key, val, step) summary_writer.flush() # Beam search metrics. if (step", "x: pad_examples(x, padded_size), pred_batch) inputs, outputs, programs = common_utils.shard(pred_batch) cache", "\"License\"); # you may not use this file except in", "1 ios.append(' ; '.join(map(str, zip(inps, outs)))) targets.append(decode_program(programs[i]).to_string()) try: predictions.append(p.to_string()) except:", "import decomposition_models as models from latent_programmer.decomposition_transformer_attention import input_pipeline from latent_programmer.tasks.robust_fill", "dropout_rng}) loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights) mean_loss = loss", "outputs, programs = common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng = p_train_step( optimizer,", "metrics = p_eval_step(optimizer.target, inputs, outputs, programs) eval_metrics.append(metrics) eval_metrics = common_utils.get_metrics(eval_metrics)", "use_relative_attention={} and \" 'attention_mask_type={}'.format(FLAGS.use_relative_attention, FLAGS.attention_mask_type)) if not gfile.isdir(FLAGS.save_dir): gfile.makedirs(FLAGS.save_dir) hparam_str_dict", "tables. id_char_table = {i+1: char for (i, char) in enumerate(dsl.CHARACTER)}", "= jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0,", "beam)) top_of_beams.append('\\n\\n'.join(top_of_beam)) all_pred_acc, all_pred_denominator = per_host_sum_pmap( jax.tree_map(np.array, (pred_acc, pred_denominator))) #", "use_dropout), attention_mask_type=FLAGS.attention_mask_type, bos_special_attention=FLAGS.bos_special_attention) predict_config = models.DecomposeAttentionTransformerConfig( base_config=base_config.replace( shift=False, deterministic=not use_dropout,", "beam search results as text summaries. message = [] for", "inputs, outputs, programs = common_utils.shard(next(train_iter)) optimizer, metrics, dropout_rng = p_train_step(", "Returns: A function learning_rate(step): float -> {'learning_rate': float}, the step-dependent", "import dsl from latent_programmer.tasks.robust_fill import tokens as dsl_tokens sys.path.append('../../') gfile", "3, 'Number of Transformer heads.') flags.DEFINE_boolean('slow_decode', True, 'Use slow decoding", "and program token sequences (for eval). def decode_io(inputs, outputs): \"\"\"Decode", "hparam_str), jax_utils.unreplicate(optimizer), step) # Periodic metric handling. # Training Metrics", "The Google Research Authors. # # Licensed under the Apache", "flat_logits, new_flat_cache # Using the above-defined single-step decoder function, run", "import decode from latent_programmer import models as base_models from latent_programmer.decomposition_transformer_attention", "json.loads(FLAGS.xm_parameters).items(): if key not in hparam_str_dict: hparam_str_dict[key] = value hparam_str", "returns [n_batch, n_beam, n_length] with beam dimension # sorted in", "step % FLAGS.log_freq == 0) or is_last_step: logging.info('Gathering training metrics.')", "x.reshape((n_device * n_batch,) + tuple(remaining_dims)) def per_host_sum_pmap(in_tree): \"\"\"Execute psum on", "- doing the # latter can add some stalls to", "latent_programmer import decode from latent_programmer import models as base_models from", "eos_token)), 1, 0).astype(jnp.float32) logits = models.DecomposeAttentionTransformer(config).apply( {'params': params}, inputs, outputs,", "encoding. beam_seqs, _ = decode.beam_search( inputs, cache, tokens_ids_to_logits, beam_size=beam_size, alpha=0.6,", "flags.DEFINE_integer('max_program_length', 100, 'Maximum number of tokens in program.') flags.DEFINE_integer('max_characters', 120,", "search on a batch.\"\"\" # Prepare transformer fast-decoder call for", "the License. # python3 \"\"\"Train seq-to-seq model on random supervised", "-1, 'Step the initial checkpoint should start at for '", "[] # Evaluation Metrics if (step and step % FLAGS.eval_freq", "learning rate. steps_per_cycle: Steps per cycle when using cosine decay.", "Research Authors. # # Licensed under the Apache License, Version", "denominator = metrics_sums.pop('denominator') summary = jax.tree_map( lambda x: x /" ]
[ "value_mesh = prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list,", "np.zeros((hnpts, vnpts)) for ih in range(hnpts): for iv in range(vnpts):", "vmax_round = round(np.max(values), 2) if(vmax_round > np.max(values)): vmax = vmax_round", "_ = lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 =", "dep2) maxdep = max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat)", "> minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)]", "2) if(vmin_round < np.min(values)): vmin = vmin_round else: vmin =", "@click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1',", "np.max(values)): vmax = vmax_round else: vmax = vmax_round+0.01 print(vmin, vmax,", "= np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh =", "dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0 r", "phi = lon z = r*cosd(theta) h = r*sind(theta) x", "dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors = np.where(distance2", "iv in range(vnpts): values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv],", "np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v =", "np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i in range(dx): for j", "0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon: {lon1}°, lat:", "required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float,", "= np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\") dx, dy, dz =", "minlat = min(lat1, lat2) maxlat = max(lat1, lat2) mindep =", "in range(dy): for k in range(dz): x_mesh[i, j, k], y_mesh[i,", "set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list = sorted(lat_set)", "@click.option('--data', required=True, type=str, help=\"the pickle file\") @click.option('--parameter', required=True, type=str, help=\"physicial", "plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") # get", "# print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting part plt.figure()", "lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k])", "set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list = sorted(lat_set) dep_list = sorted(dep_set)", "parameter): return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) &", "lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0],", "2) if(vmax_round > np.max(values)): vmax = vmax_round else: vmax =", "x, y, z, _ = lld2xyzr(lat, lon, dep) distance2 =", "= h*cosd(phi) y = h*sind(phi) return (x, y, z, r)", "lat2) maxlat = max(lat1, lat2) mindep = min(dep1, dep2) maxdep", "def lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM", "required=True, type=float, help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the pickle file\") @click.option('--parameter',", "return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) & (data_pd.dep", "lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\") dx,", "plt.xlabel( f\"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°,", "plotting part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing=\"ij\")", "lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts)", "= vmin_round else: vmin = vmin_round-0.01 vmax_round = round(np.max(values), 2)", "y, z, r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def", "plt import numpy as np import pandas as pd import", "prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list,", "lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts) values", "data, parameter, hnpts, vnpts): lon_list = [lon1, lon2] lat_list =", "@click.option('--parameter', required=True, type=str, help=\"physicial parameter to plot\") @click.option('--hnpts', required=True, type=int,", "parameter, hnpts, vnpts): lon_list = [lon1, lon2] lat_list = [lat1,", "mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list,", "to plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int,", "data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon) & (data_pd.dep ==", "dep_list[1], vnpts) return lons, lats, deps @click.command() @click.option('--lon1', required=True, type=float,", "min(dep1, dep2) maxdep = max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <=", "maxlat = max(lat1, lat2) mindep = min(dep1, dep2) maxdep =", "get_value(data_pd, lat, lon, dep, parameter): return data_pd.loc[(data_pd.lat == lat) &", "< maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep)", "y = h*sind(phi) return (x, y, z, r) @numba.njit() def", "= RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\") # return", "h*sind(phi) return (x, y, z, r) @numba.njit() def cosd(x): return", "# value_func = RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\")", "& ( data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) &", "= np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i", "help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2',", "y_mesh, z_mesh, value_mesh def get_value(data_pd, lat, lon, dep, parameter): return", "lat_list, dep_list, hnpts, vnpts) values = np.zeros((hnpts, vnpts)) for ih", "= np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") # get vmin and vmax", "int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0))", "return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons =", "as np import pandas as pd import click import numba", "[lat1, lat2] dep_list = [dep1, dep2] data_pd_raw = pd.read_pickle(data) #", "mindistance2 = np.min(distance2) coors = np.where(distance2 == mindistance2) value =", "between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)\")", "range(vnpts): values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh,", "lat_list, dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats", "vmin = vmin_round else: vmin = vmin_round-0.01 vmax_round = round(np.max(values),", "required=True, type=str, help=\"physicial parameter to plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal", "set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list = sorted(lon_set)", "pd.read_pickle(data) # data_pd is too big minlon = min(lon1, lon2)", "vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1],", "vnpts) return lons, lats, deps @click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\")", "lon_mesh[i, j, k], dep_mesh[i, j, k]) for index, row in", "= vmax_round else: vmax = vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values),", "plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical", "r*cosd(theta) h = r*sind(theta) x = h*cosd(phi) y = h*sind(phi)", "is too big minlon = min(lon1, lon2) maxlon = max(lon1,", "def get_value(data_pd, lat, lon, dep, parameter): return data_pd.loc[(data_pd.lat == lat)", "mindep = min(dep1, dep2) maxdep = max(dep1, dep2) data_pd =", "j, k], lon_mesh[i, j, k], dep_mesh[i, j, k]) for index,", "in range(dz): x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j,", "= min(lat1, lat2) maxlat = max(lat1, lat2) mindep = min(dep1,", "parameter): lon_set = set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"])", "if(vmin_round < np.min(values)): vmin = vmin_round else: vmin = vmin_round-0.01", "f\"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat:", "vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v = np.arange(vmin,", "generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts) values = np.zeros((hnpts, vnpts))", "== mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def", "np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh)", "@click.option('--vnpts', required=True, type=int, help=\"vertical npts\") def main(lon1, lon2, lat1, lat2,", "# return value_func @numba.njit() def interp_value(lat, lon, dep, x_mesh, y_mesh,", "value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh", "y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for", "dep, x_mesh, y_mesh, z_mesh, value_mesh): x, y, z, _ =", "big minlon = min(lon1, lon2) maxlon = max(lon1, lon2) minlat", "iv]) # plotting part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot,", "as plt import numpy as np import pandas as pd", "lat_list, dep_list, indexing=\"ij\") dx, dy, dz = np.shape(lon_mesh) value_mesh =", "row in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]),", "y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) #", "help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2',", "help=\"the pickle file\") @click.option('--parameter', required=True, type=str, help=\"physicial parameter to plot\")", "required=True, type=int, help=\"vertical npts\") def main(lon1, lon2, lat1, lat2, dep1,", "help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1',", "help=\"vertical npts\") def main(lon1, lon2, lat1, lat2, dep1, dep2, data,", "required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float,", "lat, lon, dep, parameter): return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon", "sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\")", "value_mesh def get_value(data_pd, lat, lon, dep, parameter): return data_pd.loc[(data_pd.lat ==", "0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i,", "lat_list = sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh =", "interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih],", "help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data',", "y_mesh, z_mesh, value_mesh): # value_func = RegularGridInterpolator( # (x_mesh, y_mesh,", "= set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list = sorted(lat_set) dep_list =", "part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") #", "lon2) maxlon = max(lon1, lon2) minlat = min(lat1, lat2) maxlat", "generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts)", "z, r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x):", "np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh,", "import click import numba def prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"])", "iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh)", "y, z, _ = lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2", "get vmin and vmax vmin_round = round(np.min(values), 2) if(vmin_round <", "h*cosd(phi) y = h*sind(phi) return (x, y, z, r) @numba.njit()", "= max(lon1, lon2) minlat = min(lat1, lat2) maxlat = max(lat1,", "lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\") dx, dy,", "vmax = vmax_round else: vmax = vmax_round+0.01 print(vmin, vmax, np.max(values),", "lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j, k]) for", "type=str, help=\"physicial parameter to plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal npts\")", "type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\")", "r_mesh = np.zeros_like(lon_mesh) for i in range(dx): for j in", "method=\"nearest\") # return value_func @numba.njit() def interp_value(lat, lon, dep, x_mesh,", "cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x)) # def", "x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot", "lon_list = sorted(lon_set) lat_list = sorted(lat_set) dep_list = sorted(dep_set) lon_mesh,", "range(dz): x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k],", "(x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\") # return value_func @numba.njit() def", "vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon: {lon1}°,", "type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical npts\") def main(lon1,", "= np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh =", "101, cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis()", "value_func = RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\") #", "type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\")", "max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & ( data_pd_raw.lat", "= [dep1, dep2] data_pd_raw = pd.read_pickle(data) # data_pd is too", "np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\") dx, dy, dz = np.shape(lon_mesh)", "mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") # get vmin and", "lats_plot, deps_plot, indexing=\"ij\") # get vmin and vmax vmin_round =", "lat) & (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0] @numba.njit()", "& (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep", "dep_mesh[i, j, k]) for index, row in data_pd.iterrows(): i =", "x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i,", "@numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x))", "z_mesh, value_mesh = prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids(", "= value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list,", "= lon z = r*cosd(theta) h = r*sind(theta) x =", "lon, dep, parameter): return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon ==", "plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon: {lon1}°, lat: {lat1}°)", "== lon) & (data_pd.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon,", "z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i in range(dx):", "= 90-lat phi = lon z = r*cosd(theta) h =", "@numba.njit() def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh): x,", "& (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep", "(lon: {lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\")", "(x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors = np.where(distance2 == mindistance2) value", "data_pd is too big minlon = min(lon1, lon2) maxlon =", "vmin_round else: vmin = vmin_round-0.01 vmax_round = round(np.max(values), 2) if(vmax_round", "parameter to plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True,", "dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh", "return (x, y, z, r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x))", "dep_list = [dep1, dep2] data_pd_raw = pd.read_pickle(data) # data_pd is", ">= mindep) & (data_pd_raw.dep <= maxdep)] x_mesh, y_mesh, z_mesh, value_mesh", "vmin_round = round(np.min(values), 2) if(vmin_round < np.min(values)): vmin = vmin_round", "help=\"physicial parameter to plot\") @click.option('--hnpts', required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts',", "lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats,", "@click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True,", "type=float, help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the pickle file\") @click.option('--parameter', required=True,", "np.where(distance2 == mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value", "lon z = r*cosd(theta) h = r*sind(theta) x = h*cosd(phi)", "range(dx): for j in range(dy): for k in range(dz): x_mesh[i,", "def prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set", "lon_list, lat_list, dep_list, hnpts, vnpts) values = np.zeros((hnpts, vnpts)) for", "lon_list = [lon1, lon2] lat_list = [lat1, lat2] dep_list =", "dy, dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh)", "import matplotlib.pyplot as plt import numpy as np import pandas", "def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1],", "= np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps", "lon, dep, x_mesh, y_mesh, z_mesh, value_mesh): x, y, z, _", "dep_list, indexing=\"ij\") dx, dy, dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh)", "r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi = lon z", "# data_pd is too big minlon = min(lon1, lon2) maxlon", "= vmin_round-0.01 vmax_round = round(np.max(values), 2) if(vmax_round > np.max(values)): vmax", "help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the pickle", "for iv in range(vnpts): values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih],", "z = r*cosd(theta) h = r*sind(theta) x = h*cosd(phi) y", "z_mesh, value_mesh): x, y, z, _ = lld2xyzr(lat, lon, dep)", "round(np.max(values), 2) if(vmax_round > np.max(values)): vmax = vmax_round else: vmax", "hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats, deps", "x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh", "# get vmin and vmax vmin_round = round(np.min(values), 2) if(vmin_round", "{lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show() if __name__ == \"__main__\": main()", "dep_mesh = np.meshgrid( lon_list, lat_list, dep_list, indexing=\"ij\") dx, dy, dz", "= set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list =", "dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & ( data_pd_raw.lat >=", "x = h*cosd(phi) y = h*sind(phi) return (x, y, z,", "= round(np.min(values), 2) if(vmin_round < np.min(values)): vmin = vmin_round else:", "as pd import click import numba def prepare_data(data_pd, parameter): lon_set", "return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func", "value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting part", "lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts): lon_list =", "= r*sind(theta) x = h*cosd(phi) y = h*sind(phi) return (x,", "# def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func = RegularGridInterpolator(", "deps = np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats, deps @click.command()", "deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih,", "lon_set = set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list", "deps_plot[iv], values[ih, iv]) # plotting part plt.figure() mesh_plot_lat, mesh_plot_dep =", "theta = 90-lat phi = lon z = r*cosd(theta) h", "max(lat1, lat2) mindep = min(dep1, dep2) maxdep = max(dep1, dep2)", "r_mesh[i, j, k] = lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j,", "k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k]", "k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr( lat_mesh[i,", "j, k], z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr(", "np.zeros_like(lon_mesh) for i in range(dx): for j in range(dy): for", "min(lon1, lon2) maxlon = max(lon1, lon2) minlat = min(lat1, lat2)", "lons, lats, deps @click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True,", "vnpts)) for ih in range(hnpts): for iv in range(vnpts): values[ih,", "lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta", "90-lat phi = lon z = r*cosd(theta) h = r*sind(theta)", "= generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts) values = np.zeros((hnpts,", "maxlon = max(lon1, lon2) minlat = min(lat1, lat2) maxlat =", "maxdep = max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) &", "and vmax vmin_round = round(np.min(values), 2) if(vmin_round < np.min(values)): vmin", "round(np.min(values), 2) if(vmin_round < np.min(values)): vmin = vmin_round else: vmin", "= lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i, j,", "= np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats, deps @click.command() @click.option('--lon1',", "def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x)) #", "@click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data', required=True,", "i in range(dx): for j in range(dy): for k in", "return lons, lats, deps @click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2',", "vnpts): lon_list = [lon1, lon2] lat_list = [lat1, lat2] dep_list", "lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors =", "lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show() if", "prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set =", "j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j,", "sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list,", "@click.option('--lat1', required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True,", "label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and", "int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] = row[parameter] return x_mesh, y_mesh,", "main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts):", "= np.where(distance2 == mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return", "# (x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\") # return value_func @numba.njit()", "in range(dx): for j in range(dy): for k in range(dz):", "click import numba def prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"]) lat_set", "= max(lat1, lat2) mindep = min(dep1, dep2) maxdep = max(dep1,", "j, k] = lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j, k],", "npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical npts\") def main(lon1, lon2, lat1,", "sind(x): return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): #", "plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax, 0.01)", "= np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh =", "@click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the pickle file\")", "h = r*sind(theta) x = h*cosd(phi) y = h*sind(phi) return", "> np.max(values)): vmax = vmax_round else: vmax = vmax_round+0.01 print(vmin,", "for j in range(dy): for k in range(dz): x_mesh[i, j,", "values = np.zeros((hnpts, vnpts)) for ih in range(hnpts): for iv", "def sind(x): return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh):", "lat2] dep_list = [dep1, dep2] data_pd_raw = pd.read_pickle(data) # data_pd", "= (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi = lon z =", "np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func =", "{lat1}°) and (lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show() if __name__", "vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax,", "np.min(values)): vmin = vmin_round else: vmin = vmin_round-0.01 vmax_round =", "vmax, np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r)", "vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values,", "hnpts, vnpts) values = np.zeros((hnpts, vnpts)) for ih in range(hnpts):", "maxdep)] x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter) lons_plot, lats_plot,", "mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") # get vmin", "y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j, k] =", ">= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon)", "<= maxlat) & ( data_pd_raw.lat >= minlat) & (data_pd_raw.lon <", "sorted(lon_set) lat_list = sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh", "get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func = RegularGridInterpolator( # (x_mesh,", "dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list,", "np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh)", "return x_mesh, y_mesh, z_mesh, value_mesh def get_value(data_pd, lat, lon, dep,", "def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh): x, y,", "lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts, vnpts): lon_list", "data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & ( data_pd_raw.lat >= minlat) & (data_pd_raw.lon", "value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0],", "lat_set = set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list", "i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k =", "r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return", "required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data', required=True, type=str,", "plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon: {lon1}°, lat: {lat1}°) and (lon:", "= round(np.max(values), 2) if(vmax_round > np.max(values)): vmax = vmax_round else:", "k], lon_mesh[i, j, k], dep_mesh[i, j, k]) for index, row", "= [lat1, lat2] dep_list = [dep1, dep2] data_pd_raw = pd.read_pickle(data)", "np.linspace(dep_list[0], dep_list[1], vnpts) return lons, lats, deps @click.command() @click.option('--lon1', required=True,", "coors = np.where(distance2 == mindistance2) value = value_mesh[coors[0][0], coors[1][0], coors[2][0]]", "lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih],", "in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0))", "import pandas as pd import click import numba def prepare_data(data_pd,", "lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv],", "= np.zeros((hnpts, vnpts)) for ih in range(hnpts): for iv in", "== dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0", "@numba.njit() def sind(x): return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh, y_mesh, z_mesh,", "ih in range(hnpts): for iv in range(vnpts): values[ih, iv] =", "== lat) & (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0]", "else: vmax = vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round)", "dep2, data, parameter, hnpts, vnpts): lon_list = [lon1, lon2] lat_list", "z_mesh[i, j, k], r_mesh[i, j, k] = lld2xyzr( lat_mesh[i, j,", "row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh def get_value(data_pd, lat, lon,", "value_mesh, method=\"nearest\") # return value_func @numba.njit() def interp_value(lat, lon, dep,", "k], r_mesh[i, j, k] = lld2xyzr( lat_mesh[i, j, k], lon_mesh[i,", "& (data_pd.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM", "r*sind(theta) x = h*cosd(phi) y = h*sind(phi) return (x, y,", "lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts)", "[lon1, lon2] lat_list = [lat1, lat2] dep_list = [dep1, dep2]", "lon) & (data_pd.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep):", "values[ih, iv]) # plotting part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid(", "required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float,", "= sorted(lon_set) lat_list = sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh,", "values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh,", "type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\")", "for k in range(dz): x_mesh[i, j, k], y_mesh[i, j, k],", "deps @click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\")", "= np.zeros_like(lon_mesh) for i in range(dx): for j in range(dy):", "value_mesh): # value_func = RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh,", "0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] = row[parameter]", "z_mesh, value_mesh): # value_func = RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh),", "lats, deps @click.command() @click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float,", "import numpy as np import pandas as pd import click", "deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts, vnpts) values =", "k in range(dz): x_mesh[i, j, k], y_mesh[i, j, k], z_mesh[i,", "type=int, help=\"vertical npts\") def main(lon1, lon2, lat1, lat2, dep1, dep2,", "required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical npts\") def", "y_mesh, z_mesh), value_mesh, method=\"nearest\") # return value_func @numba.njit() def interp_value(lat,", "np.zeros_like(lon_mesh) x_mesh = np.zeros_like(lon_mesh) y_mesh = np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh)", "coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts):", "(data_pd.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM =", "= int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]),", "@click.option('--hnpts', required=True, type=int, help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical npts\")", "np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v", "def get_value_func(x_mesh, y_mesh, z_mesh, value_mesh): # value_func = RegularGridInterpolator( #", "help=\"horizontal npts\") @click.option('--vnpts', required=True, type=int, help=\"vertical npts\") def main(lon1, lon2,", "x_mesh, y_mesh, z_mesh, value_mesh def get_value(data_pd, lat, lon, dep, parameter):", "lat2) mindep = min(dep1, dep2) maxdep = max(dep1, dep2) data_pd", "minlon = min(lon1, lon2) maxlon = max(lon1, lon2) minlat =", "def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter, hnpts,", "if(vmax_round > np.max(values)): vmax = vmax_round else: vmax = vmax_round+0.01", "j, k] = row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh def", "= 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi =", "(x, y, z, r) @numba.njit() def cosd(x): return np.cos(np.deg2rad(x)) @numba.njit()", "= int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k]", "mindep) & (data_pd_raw.dep <= maxdep)] x_mesh, y_mesh, z_mesh, value_mesh =", "= pd.read_pickle(data) # data_pd is too big minlon = min(lon1,", "max(lon1, lon2) minlat = min(lat1, lat2) maxlat = max(lat1, lat2)", "= min(lon1, lon2) maxlon = max(lon1, lon2) minlat = min(lat1,", "maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) &", "np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts) return lons,", "index, row in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j =", "cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel(", "( data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon", "z_mesh, value_mesh def get_value(data_pd, lat, lon, dep, parameter): return data_pd.loc[(data_pd.lat", "k] = lld2xyzr( lat_mesh[i, j, k], lon_mesh[i, j, k], dep_mesh[i,", "x_mesh, y_mesh, z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv])", "np import pandas as pd import click import numba def", "file\") @click.option('--parameter', required=True, type=str, help=\"physicial parameter to plot\") @click.option('--hnpts', required=True,", "npts\") def main(lon1, lon2, lat1, lat2, dep1, dep2, data, parameter,", "distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors = np.where(distance2 ==", "k]) for index, row in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0))", "(data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)] x_mesh, y_mesh, z_mesh,", "type=float, help=\"dep1\") @click.option('--dep2', required=True, type=float, help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the", "= row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh def get_value(data_pd, lat,", "x_mesh, y_mesh, z_mesh, value_mesh): x, y, z, _ = lld2xyzr(lat,", "= h*sind(phi) return (x, y, z, r) @numba.njit() def cosd(x):", "# plotting part plt.figure() mesh_plot_lat, mesh_plot_dep = np.meshgrid( lats_plot, deps_plot,", "vmax vmin_round = round(np.min(values), 2) if(vmin_round < np.min(values)): vmin =", "print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting part plt.figure() mesh_plot_lat,", "lat2, dep1, dep2, data, parameter, hnpts, vnpts): lon_list = [lon1,", "help=\"dep2\") @click.option('--data', required=True, type=str, help=\"the pickle file\") @click.option('--parameter', required=True, type=str,", "& (data_pd_raw.dep <= maxdep)] x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd,", "range(dy): for k in range(dz): x_mesh[i, j, k], y_mesh[i, j,", "return np.cos(np.deg2rad(x)) @numba.njit() def sind(x): return np.sin(np.deg2rad(x)) # def get_value_func(x_mesh,", "= [lon1, lon2] lat_list = [lat1, lat2] dep_list = [dep1,", "j, k]) for index, row in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]),", "np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between (lon:", "z_mesh), value_mesh, method=\"nearest\") # return value_func @numba.njit() def interp_value(lat, lon,", "in range(vnpts): values[ih, iv] = interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh,", "pandas as pd import click import numba def prepare_data(data_pd, parameter):", "= min(dep1, dep2) maxdep = max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat", "v = np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°)", "j, k], r_mesh[i, j, k] = lld2xyzr( lat_mesh[i, j, k],", "= prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list,", "vmin = vmin_round-0.01 vmax_round = round(np.max(values), 2) if(vmax_round > np.max(values)):", "@numba.njit() def lld2xyzr(lat, lon, dep): R_EARTH_KM = 6371.0 r =", "for index, row in data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j", "numba def prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"])", "0)) value_mesh[i, j, k] = row[parameter] return x_mesh, y_mesh, z_mesh,", "values, 101, cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\")", "@click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float, help=\"dep1\") @click.option('--dep2', required=True,", "mesh_plot_dep, values, 101, cmap=plt.cm.seismic_r) v = np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v,", "= np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i in range(dx): for", "range(hnpts): for iv in range(vnpts): values[ih, iv] = interp_value( lats_plot[ih],", "dep1, dep2, data, parameter, hnpts, vnpts): lon_list = [lon1, lon2]", "= sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid( lon_list, lat_list, dep_list,", "lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts)", "k] = row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh def get_value(data_pd,", "dep2] data_pd_raw = pd.read_pickle(data) # data_pd is too big minlon", "lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting part plt.figure() mesh_plot_lat, mesh_plot_dep", "vmin_round-0.01 vmax_round = round(np.max(values), 2) if(vmax_round > np.max(values)): vmax =", "@click.option('--lon1', required=True, type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True,", "hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1],", "= r*cosd(theta) h = r*sind(theta) x = h*cosd(phi) y =", "return value_func @numba.njit() def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh,", "indexing=\"ij\") # get vmin and vmax vmin_round = round(np.min(values), 2)", "(data_pd_raw.dep <= maxdep)] x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter)", "= set(data_pd[\"lon\"]) lat_set = set(data_pd[\"lat\"]) dep_set = set(data_pd[\"dep\"]) lon_list =", "interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh): x, y, z,", "type=float, help=\"lon1\") @click.option('--lon2', required=True, type=float, help=\"lon2\") @click.option('--lat1', required=True, type=float, help=\"lat1\")", "6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi = lon", "z, _ = lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2", "value_func @numba.njit() def interp_value(lat, lon, dep, x_mesh, y_mesh, z_mesh, value_mesh):", "for ih in range(hnpts): for iv in range(vnpts): values[ih, iv]", "= max(dep1, dep2) data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & (", "coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts, vnpts): lons", "= vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep,", "j, k], dep_mesh[i, j, k]) for index, row in data_pd.iterrows():", "type=str, help=\"the pickle file\") @click.option('--parameter', required=True, type=str, help=\"physicial parameter to", "dx, dy, dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh =", "maxlat) & ( data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon)", "deps_plot, indexing=\"ij\") # get vmin and vmax vmin_round = round(np.min(values),", "too big minlon = min(lon1, lon2) maxlon = max(lon1, lon2)", "value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list, dep_list, hnpts,", "dep_list, hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats =", "lon, dep): R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta =", "RegularGridInterpolator( # (x_mesh, y_mesh, z_mesh), value_mesh, method=\"nearest\") # return value_func", "np.zeros_like(lon_mesh) z_mesh = np.zeros_like(lon_mesh) r_mesh = np.zeros_like(lon_mesh) for i in", "import numba def prepare_data(data_pd, parameter): lon_set = set(data_pd[\"lon\"]) lat_set =", "data_pd.iterrows(): i = int(round((row.lon-lon_list[0])/(lon_list[1]-lon_list[0]), 0)) j = int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k", "matplotlib.pyplot as plt import numpy as np import pandas as", "lon_list, lat_list, dep_list, indexing=\"ij\") dx, dy, dz = np.shape(lon_mesh) value_mesh", "numpy as np import pandas as pd import click import", "pickle file\") @click.option('--parameter', required=True, type=str, help=\"physicial parameter to plot\") @click.option('--hnpts',", "(lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show() if __name__ == \"__main__\":", "np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0], lat_list[1], hnpts) deps =", "in range(hnpts): for iv in range(vnpts): values[ih, iv] = interp_value(", "<= maxdep)] x_mesh, y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter) lons_plot,", "minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)] x_mesh,", "np.min(distance2) coors = np.where(distance2 == mindistance2) value = value_mesh[coors[0][0], coors[1][0],", "k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] = row[parameter] return", "y_mesh, z_mesh, value_mesh = prepare_data(data_pd, parameter) lons_plot, lats_plot, deps_plot =", "required=True, type=float, help=\"lat1\") @click.option('--lat2', required=True, type=float, help=\"lat2\") @click.option('--dep1', required=True, type=float,", "= data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & ( data_pd_raw.lat >= minlat) &", "data_pd = data_pd_raw.loc[(data_pd_raw.lat <= maxlat) & ( data_pd_raw.lat >= minlat)", "dep, parameter): return data_pd.loc[(data_pd.lat == lat) & (data_pd.lon == lon)", "np.meshgrid( lats_plot, deps_plot, indexing=\"ij\") # get vmin and vmax vmin_round", "= lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2)", "= int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] = row[parameter] return x_mesh,", "hnpts, vnpts): lon_list = [lon1, lon2] lat_list = [lat1, lat2]", "k], dep_mesh[i, j, k]) for index, row in data_pd.iterrows(): i", "j in range(dy): for k in range(dz): x_mesh[i, j, k],", "value_mesh[i, j, k] = row[parameter] return x_mesh, y_mesh, z_mesh, value_mesh", "required=True, type=str, help=\"the pickle file\") @click.option('--parameter', required=True, type=str, help=\"physicial parameter", "lat_list = [lat1, lat2] dep_list = [dep1, dep2] data_pd_raw =", "lld2xyzr(lat, lon, dep) distance2 = (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors", "parameter) lons_plot, lats_plot, deps_plot = generate_vertical_profile_grids( lon_list, lat_list, dep_list, hnpts,", "vmax_round else: vmax = vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values), vmin_round,", "dep): R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat", "R_EARTH_KM = 6371.0 r = (R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi", "lon2] lat_list = [lat1, lat2] dep_list = [dep1, dep2] data_pd_raw", "data_pd_raw = pd.read_pickle(data) # data_pd is too big minlon =", "hnpts, vnpts): lons = np.linspace(lon_list[0], lon_list[1], hnpts) lats = np.linspace(lat_list[0],", "else: vmin = vmin_round-0.01 vmax_round = round(np.max(values), 2) if(vmax_round >", "< np.min(values)): vmin = vmin_round else: vmin = vmin_round-0.01 vmax_round", "indexing=\"ij\") dx, dy, dz = np.shape(lon_mesh) value_mesh = np.zeros_like(lon_mesh) x_mesh", "min(lat1, lat2) maxlat = max(lat1, lat2) mindep = min(dep1, dep2)", "dep_list, hnpts, vnpts) values = np.zeros((hnpts, vnpts)) for ih in", "minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) &", "j, k], y_mesh[i, j, k], z_mesh[i, j, k], r_mesh[i, j,", "and (lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show() if __name__ ==", "vmax = vmax_round+0.01 print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat,", "= (x_mesh-x)**2+(y_mesh-y)**2+(z_mesh-z)**2 mindistance2 = np.min(distance2) coors = np.where(distance2 == mindistance2)", "= np.linspace(lat_list[0], lat_list[1], hnpts) deps = np.linspace(dep_list[0], dep_list[1], vnpts) return", "dep_set = set(data_pd[\"dep\"]) lon_list = sorted(lon_set) lat_list = sorted(lat_set) dep_list", "z_mesh, value_mesh) # print(lats_plot[ih], lons_plot[ih], deps_plot[iv], values[ih, iv]) # plotting", "for i in range(dx): for j in range(dy): for k", "print(vmin, vmax, np.max(values), np.min(values), vmin_round, vmax_round) plt.contourf(mesh_plot_lat, mesh_plot_dep, values, 101,", "& (data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0] @numba.njit() def", "value = value_mesh[coors[0][0], coors[1][0], coors[2][0]] return value def generate_vertical_profile_grids(lon_list, lat_list,", "[dep1, dep2] data_pd_raw = pd.read_pickle(data) # data_pd is too big", "= np.min(distance2) coors = np.where(distance2 == mindistance2) value = value_mesh[coors[0][0],", "data_pd_raw.lat >= minlat) & (data_pd_raw.lon < maxlon) & (data_pd_raw.lon >", "= np.arange(vmin, vmax, 0.01) plt.colorbar(ticks=v, label=\"perturbation\") plt.gca().invert_yaxis() plt.xlabel( f\"latitude(°) between", "(data_pd.lon == lon) & (data_pd.dep == dep)][parameter].values[0] @numba.njit() def lld2xyzr(lat,", "(R_EARTH_KM-dep)/R_EARTH_KM theta = 90-lat phi = lon z = r*cosd(theta)", "{lon1}°, lat: {lat1}°) and (lon: {lon2}°, lat: {lat2}°)\") plt.ylabel(\"depth(km)\") plt.show()", "(data_pd_raw.lon > minlon) & (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <=", "vmin and vmax vmin_round = round(np.min(values), 2) if(vmin_round < np.min(values)):", "int(round((row.lat-lat_list[0])/(lat_list[1]-lat_list[0]), 0)) k = int(round((row.dep-dep_list[0])/(dep_list[1]-dep_list[0]), 0)) value_mesh[i, j, k] =", "value_mesh): x, y, z, _ = lld2xyzr(lat, lon, dep) distance2", "(data_pd_raw.lon < maxlon) & (data_pd_raw.lon > minlon) & (data_pd_raw.dep >=", "y_mesh, z_mesh, value_mesh): x, y, z, _ = lld2xyzr(lat, lon,", "= interp_value( lats_plot[ih], lons_plot[ih], deps_plot[iv], x_mesh, y_mesh, z_mesh, value_mesh) #", "& (data_pd_raw.dep >= mindep) & (data_pd_raw.dep <= maxdep)] x_mesh, y_mesh,", "lon2) minlat = min(lat1, lat2) maxlat = max(lat1, lat2) mindep", "pd import click import numba def prepare_data(data_pd, parameter): lon_set =", "vnpts) values = np.zeros((hnpts, vnpts)) for ih in range(hnpts): for", "= sorted(lat_set) dep_list = sorted(dep_set) lon_mesh, lat_mesh, dep_mesh = np.meshgrid(" ]
[ "test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app = create_sample_app() assert app.config['SQLALCHEMY_DATABASE_URI'] == 'heroku-db-uri'", "Flask from flask_appconfig import HerokuConfig def create_sample_app(): app = Flask('testapp')", "create_sample_app(): app = Flask('testapp') HerokuConfig(app) return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL',", "from flask_appconfig import HerokuConfig def create_sample_app(): app = Flask('testapp') HerokuConfig(app)", "app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app = create_sample_app() assert app.config['SQLALCHEMY_DATABASE_URI']", "= Flask('testapp') HerokuConfig(app) return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app", "from flask import Flask from flask_appconfig import HerokuConfig def create_sample_app():", "app = Flask('testapp') HerokuConfig(app) return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri')", "return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app = create_sample_app() assert", "import Flask from flask_appconfig import HerokuConfig def create_sample_app(): app =", "def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app = create_sample_app() assert app.config['SQLALCHEMY_DATABASE_URI'] ==", "HerokuConfig(app) return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app = create_sample_app()", "def create_sample_app(): app = Flask('testapp') HerokuConfig(app) return app def test_herokupostgres(monkeypatch):", "HerokuConfig def create_sample_app(): app = Flask('testapp') HerokuConfig(app) return app def", "flask import Flask from flask_appconfig import HerokuConfig def create_sample_app(): app", "import HerokuConfig def create_sample_app(): app = Flask('testapp') HerokuConfig(app) return app", "flask_appconfig import HerokuConfig def create_sample_app(): app = Flask('testapp') HerokuConfig(app) return", "Flask('testapp') HerokuConfig(app) return app def test_herokupostgres(monkeypatch): monkeypatch.setenv('HEROKU_POSTGRESQL_ORANGE_URL', 'heroku-db-uri') app =" ]
[ "stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if disabled # logger.disabled", "Set Flask logger \"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log =", "import logging \"\"\" Formatter \"\"\" formatter = logging.Formatter('%(asctime)s - %(name)s", "= logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\"", "- %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger \"\"\" logger =", "logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if disabled # logger.disabled = True", "Formatter \"\"\" formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -", "\"\"\" formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s',", "= logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if", "logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set", "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S')", "%(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger \"\"\" logger = logging.getLogger('FLASK_LOG')", "\"\"\" Set Flask logger \"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log", "%(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger \"\"\" logger", "= logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if disabled # logger.disabled =", "logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if disabled #", "logging \"\"\" Formatter \"\"\" formatter = logging.Formatter('%(asctime)s - %(name)s -", "- %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask", "logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) #", "logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log) # if disabled", "\"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter) logger.addHandler(stream_log)", "Flask logger \"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler()", "%(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger", "- %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger \"\"\"", "logger \"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG) stream_log = logging.StreamHandler() stream_log.setFormatter(formatter)", "\"\"\" Formatter \"\"\" formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s", "datefmt='%Y-%m-%d:%H:%M:%S') \"\"\" Set Flask logger \"\"\" logger = logging.getLogger('FLASK_LOG') logger.setLevel(logging.DEBUG)" ]
[ "s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name =", "os.path.join(BACKUP_DIR, path) print('Uploading %s to %s on S3 bucket %s'", "/ %s (%.2f%%)\" % ( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush()", "100 sys.stdout.write( \"\\r%s %s / %s (%.2f%%)\" % ( self._filename,", "s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path)", "( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() import time import boto3", "= ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading", "bytes_amount): # To simplify, assume this is hooked up to", "ProgressPercentage(object): def __init__(self, filename): self._filename = filename self._size = float(os.path.getsize(filename))", "%s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path)) os.remove(full_path)", "path = os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path) print('Uploading %s to", "threading class ProgressPercentage(object): def __init__(self, filename): self._filename = filename self._size", "# To simplify, assume this is hooked up to a", "import threading class ProgressPercentage(object): def __init__(self, filename): self._filename = filename", "path) print('Uploading %s to %s on S3 bucket %s' %", "class ProgressPercentage(object): def __init__(self, filename): self._filename = filename self._size =", "= 'newsblur_web.settings' import threading class ProgressPercentage(object): def __init__(self, filename): self._filename", "with self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far / self._size)", "print('Uploading %s to %s on S3 bucket %s' % (full_path,", "NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import", "import boto3 from django.conf import settings BACKUP_DIR = '/srv/newsblur/backup/' s3", "self._seen_so_far += bytes_amount percentage = (self._seen_so_far / self._size) * 100", "import time import boto3 from django.conf import settings BACKUP_DIR =", "django.conf import settings BACKUP_DIR = '/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY,", "to a single filename with self._lock: self._seen_so_far += bytes_amount percentage", "boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path", "= f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path) print('Uploading", "this is hooked up to a single filename with self._lock:", "__init__(self, filename): self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far =", "'newsblur_web.settings' import threading class ProgressPercentage(object): def __init__(self, filename): self._filename =", "sys.stdout.write( \"\\r%s %s / %s (%.2f%%)\" % ( self._filename, self._seen_so_far,", "filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock()", "simplify, assume this is hooked up to a single filename", "hooked up to a single filename with self._lock: self._seen_so_far +=", "= 0 self._lock = threading.Lock() def __call__(self, bytes_amount): # To", "self._lock = threading.Lock() def __call__(self, bytes_amount): # To simplify, assume", "= float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self,", "a single filename with self._lock: self._seen_so_far += bytes_amount percentage =", "sys.stdout.flush() import time import boto3 from django.conf import settings BACKUP_DIR", "os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings'", "sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading class ProgressPercentage(object): def", "settings BACKUP_DIR = '/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname", "assume this is hooked up to a single filename with", "'/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name", "import os import sys import socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR", "on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) s3.upload_file(full_path, settings.S3_BACKUP_BUCKET,", "%s to %s on S3 bucket %s' % (full_path, s3_object_name,", "= boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql'", "f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path) print('Uploading %s", "socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR,", "0 self._lock = threading.Lock() def __call__(self, bytes_amount): # To simplify,", "filename): self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0", "sys import socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../'])", "self._seen_so_far, self._size, percentage)) sys.stdout.flush() import time import boto3 from django.conf", "def __call__(self, bytes_amount): # To simplify, assume this is hooked", "BACKUP_DIR = '/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname =", "= os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path) print('Uploading %s to %s", "NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading class ProgressPercentage(object): def __init__(self,", "self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far / self._size) *", "import settings BACKUP_DIR = '/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET)", "''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading class", "S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name,", "= os.path.join(BACKUP_DIR, path) print('Uploading %s to %s on S3 bucket", "* 100 sys.stdout.write( \"\\r%s %s / %s (%.2f%%)\" % (", "def __init__(self, filename): self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far", "(self._seen_so_far / self._size) * 100 sys.stdout.write( \"\\r%s %s / %s", "single filename with self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far", "%s (%.2f%%)\" % ( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() import", "threading.Lock() def __call__(self, bytes_amount): # To simplify, assume this is", "% ( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() import time import", "self._size) * 100 sys.stdout.write( \"\\r%s %s / %s (%.2f%%)\" %", "os import sys import socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR =", "CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE']", "boto3 from django.conf import settings BACKUP_DIR = '/srv/newsblur/backup/' s3 =", "os.listdir(BACKUP_DIR)[0] full_path = os.path.join(BACKUP_DIR, path) print('Uploading %s to %s on", "aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path =", "= (self._seen_so_far / self._size) * 100 sys.stdout.write( \"\\r%s %s /", "up to a single filename with self._lock: self._seen_so_far += bytes_amount", "bytes_amount percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( \"\\r%s", "\"\\r%s %s / %s (%.2f%%)\" % ( self._filename, self._seen_so_far, self._size,", "self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() import time import boto3 from", "percentage)) sys.stdout.flush() import time import boto3 from django.conf import settings", "self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def", "hostname = socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path", "+= bytes_amount percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write(", "= socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0] full_path =", "socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR)", "To simplify, assume this is hooked up to a single", "self._filename = filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock", "percentage = (self._seen_so_far / self._size) * 100 sys.stdout.write( \"\\r%s %s", "'/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading class ProgressPercentage(object):", "= os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0, NEWSBLUR_DIR) os.environ['DJANGO_SETTINGS_MODULE'] =", "/ self._size) * 100 sys.stdout.write( \"\\r%s %s / %s (%.2f%%)\"", "is hooked up to a single filename with self._lock: self._seen_so_far", "self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount): #", "import sys import socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR,", "self._size, percentage)) sys.stdout.flush() import time import boto3 from django.conf import", "= '/srv/newsblur/backup/' s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_')", "%s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) s3.upload_file(full_path,", "time import boto3 from django.conf import settings BACKUP_DIR = '/srv/newsblur/backup/'", "full_path = os.path.join(BACKUP_DIR, path) print('Uploading %s to %s on S3", "= filename self._size = float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock =", "to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET))", "__call__(self, bytes_amount): # To simplify, assume this is hooked up", "filename with self._lock: self._seen_so_far += bytes_amount percentage = (self._seen_so_far /", "import socket CURRENT_DIR = os.path.dirname(__file__) NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../']) sys.path.insert(0,", "float(os.path.getsize(filename)) self._seen_so_far = 0 self._lock = threading.Lock() def __call__(self, bytes_amount):", "%s / %s (%.2f%%)\" % ( self._filename, self._seen_so_far, self._size, percentage))", "os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings' import threading class ProgressPercentage(object): def __init__(self, filename):", "from django.conf import settings BACKUP_DIR = '/srv/newsblur/backup/' s3 = boto3.client('s3',", "(%.2f%%)\" % ( self._filename, self._seen_so_far, self._size, percentage)) sys.stdout.flush() import time", "bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET)) s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path))", "#!/usr/bin/python3 import os import sys import socket CURRENT_DIR = os.path.dirname(__file__)", "= threading.Lock() def __call__(self, bytes_amount): # To simplify, assume this", "aws_secret_access_key=settings.S3_SECRET) hostname = socket.gethostname().replace('-','_') s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime(\"%Y-%m-%d-%H-%M\")}.sql' path = os.listdir(BACKUP_DIR)[0]" ]
[ "Clean VNF Args: * instance_id: The service instance of the", "list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes for instance, vnf and", "the VNF service instance vnf_id: The ID of the VNF", "Solution(object): \"\"\" VNF: Class to automate the instantiation of a", "instance_id, the request_info and the service payload * vnf_info: dict", "Module instance created: %s\", module_instance) module_info = ( {'module_instance': module_instance,", "vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']}", "id of the VNF \"\"\" self.__logger.info(\" Clean Module VF Instance", "self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\" Clean VNF Module Args: *", "= ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"]", "= {\"instance_id\": \"\"} vnf_info = {\"vnf_id\": \"\"} module_info = {}", "# pylint: disable=missing-docstring # pylint: disable=duplicate-code import logging import time", "ID of the VNF service instance vnf_id: The ID of", "\"metadata.UUID\") def set_vnf_var(self): \"\"\" set vnf variables from the config", "....\", elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False break", "instance_id = None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]:", "instance of the VNF \"\"\" self.__logger.info(\" Clean Service Instance \")", "request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id =", "kwargs[\"nbi\"] # can be useful to destroy resources, sdnc module", "= {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>>", "the Design phase has been already done The yaml template", "Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible to find the stack %s", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = (", "elt) instance_id = self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"]", "\".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\")", "sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def", "def clean_vnf(self, elt): \"\"\" Clean VNF Args: * instance_id: The", "else: self.__logger.info(\"1) Create Service instance in SO\") self.__logger.info(\"********************************\") request_info =", "OpenStack\", stack_name) return check_vnf def clean_instance(self, instance_id): \"\"\" Clean VNF", "= self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload(", "component objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY,", "the VNF \"\"\" vnf_id = None self.__logger.info(\"2) Create VNF instance", "+ \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def set_module_var(self): \"\"\"", "def clean_instance(self, instance_id): \"\"\" Clean VNF instance Args: * instance_id:", "can be useful to destroy resources, sdnc module name shall", "instance(s) for elt in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf", "module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s) created: %s\", module_info) self.module_infos[elt]", "elt): \"\"\" Preload VNF in SDNC Args: * elt: the", "self.set_onap_components() def set_service_instance_var(self): \"\"\" set service instance variables from the", "instance_id: The service instance id of the VNF * vnf_id:The", "service_ok: # create VNF instance(s) for elt in self.vnf_infos['list']: vnf_info", "= ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"]", "accompanies this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0", "find the stack %s in OpenStack\", stack_name) return check_vnf def", "VNF \"\"\" vnf_preload_infos = {} self.__logger.info(\"3) Preload VNF %s in", "+ vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def set_module_var(self): \"\"\" set module", "if 1 of the expected preload clean is FAIL we", "module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return", "SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])", "instantiation of the VNF\") instance_info = self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance(", "= service_instance_info return service_instance_info def create_vnf_instance(self, elt): \"\"\" Create VNF", "+ \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string'])", "made available under the terms of the Apache License, Version", "<reponame>Orange-OpenSource/xtesting-onap-tests<filename>onap_tests/scenario/solution.py #!/usr/bin/python # # This program and the accompanying materials", "= nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\" Instantiate a VNF with", "in OpenStack\", stack_name) return check_vnf def clean_instance(self, instance_id): \"\"\" Clean", "instance Args: * instance_info: dict including the instance_id, the request_info", "not precised we set mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] =", "+ str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) request_info =", "as sc import onap_tests.utils.utils as onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class", "self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\", elt) vnf_ok = True self.__logger.info(\"Check", "+ \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type", "\"\"\" instance_id = self.service_infos['instance_id'] for elt in self.vnf_infos['list']: vnf_id =", "vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] =", "break else: # check VNF using OpenStack directly check_vnf =", "{\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf}", "self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\"", "Args: * instance_id: The service instance id of the VNF", "module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break", "module_ok = False break else: # check VNF using OpenStack", "import onap_tests.utils.utils as onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\"", "\"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload", "# by convention is VNF is not precised we set", "service instance id of the VNF * vnf_id:The VNF id", "= self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\",", "= False check_vnf = False self.__logger.info(\"Start the instantiation of the", "VNF \"\"\" self.__logger.info(\" Clean Module VF Instance %s \", elt)", "self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"],", "with the preload id \"\"\" self.__logger.info(\" Clean Preload \") for", "\"_\" + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload(", "for elt in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s", "nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service instance in SO\") self.__logger.info(\"********************************\") request_info", "%s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\":", "set_service_instance_var(self): \"\"\" set service instance variables from the config file", "# self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload)", "self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance created: %s\", module_instance) module_info =", "module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"],", "= self.service_infos['instance_id'] for elt in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id", "of a VNF It is assumed that the Design phase", "\"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "False else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id)", "and the accompanying materials # are made available under the", "vnf_info = {\"vnf_id\": \"\"} module_info = {} module_ref = {\"instanceId\":", "if service_ok: # create VNF instance(s) for elt in self.vnf_infos['list']:", "%s\", module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module", "vf_config = {} # we cannot be sure that the", "\"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf instance created %s\",", "self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id) def clean_preload(self, elt): \"\"\" Clean", "if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False break else: #", "# create VNF instance(s) for elt in self.vnf_infos['list']: vnf_info =", "\"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\" Create module", "vnf instance created %s\", vnf_info) self.vnf_infos[elt] = vnf_info return vnf_info", "self.__logger.info(\"SO module vf(s) created: %s\", module_info) self.module_infos[elt] = module_info return", "is available at # # http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring", "name shall be given if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] =", "{} if \"case\" not in kwargs: # by convention is", "dict including the instance_id, the request_info and the service payload", "from ONAP Args: instance_id: The ID of the VNF service", "\"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set", "The ID of the VNF instance module_id: The ID of", "\"\"\" Clean VNF SDNC preload with the preload id \"\"\"", "onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\")", "self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"]", "using OpenStack directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack", "elt in self.vnf_infos['list']: vf_config = {} # we cannot be", "not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break else: #", "The service instance of the VNF \"\"\" self.__logger.info(\" Clean Service", "= self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\", sdnc_payload) sdnc_preload", "VNF * module_id: the VF module id of the VNF", "module_info def check_vnf(self, stack_name): \"\"\" Check VNF stack has been", "\"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i]", "2.0 # which accompanies this distribution, and is available at", "vnf variables from the config file \"\"\" for i, elt", "are made available under the terms of the Apache License,", "self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in AAI DB\")", "self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance from", "including the vnf_id, vnf_related_instance and the vnf payload \"\"\" module_info", "\"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "module_info = {} self.__logger.info(\"4) Create MODULE %s instance in SO\",", "vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF", "(\"_\") + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload(", "expected preload clean is FAIL we return False clean_preload =", "elt: the VNF \"\"\" vnf_preload_infos = {} self.__logger.info(\"3) Preload VNF", "self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully checked\") return {\"status\": module_ok, \"instance_id\":", "vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] +", "return vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\" Create module instance Args:", "if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except Exception: # pylint: disable=broad-except", "preload clean is FAIL we return False clean_preload = self.components[\"sdnc\"].delete_preload(", "return clean_preload def clean_all_preload(self): \"\"\" Clean VNF SDNC preload with", "= ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id =", "the vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param(", "module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf} def", "which accompanies this distribution, and is available at # #", "the vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = (", "self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt): \"\"\" Clean", "{ \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name,", "of %s \", elt) # if 1 of the expected", "create VNF instance(s) for elt in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt)", "\"\"\" Set ONAP component objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger)", "VNF: Class to automate the instantiation of a VNF It", "shall be given if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"]", "\"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf instance", "vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] +", "\"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf} def clean(self): \"\"\" Clean", "%s\", service_instance_info) self.service_infos = service_instance_info return service_instance_info def create_vnf_instance(self, elt):", "module_id: The ID of the VF module instance \"\"\" instance_id", "= self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\", elt) vnf_ok = True", "are in teh same order # than the vnf vf_index", "self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name", "\"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC", "the request_info and the service payload * vnf_info: dict including", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param(", "preload VNF(s) in SDNC self.preload(elt) time.sleep(10) if vnf_ok: # create", "\"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\":", "module_instance) module_info = ( {'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload,", "vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))", "disable=duplicate-code import logging import time import onap_tests.components.aai as aai import", "__logger = logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution,", "# Class attributes for instance, vnf and module VF self.service_infos", "SO\", elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'],", "in kwargs: # by convention is VNF is not precised", "* instance_id: The service instance of the VNF * vnf_id:The", "so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger)", "in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" +", "self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\" set vnf variables from the", "preload payload %s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer:", "variables from the config file \"\"\" for elt in self.vnf_infos['list']:", "instance_info = {\"instance_id\": \"\"} vnf_info = {\"vnf_id\": \"\"} module_info =", "in the SDNC * Create the VF module instance (SO)", "nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"]))", "the service instance (SO) * Create the VNF instance (SO)", "\"\"\" Clean VNF from ONAP Args: instance_id: The ID of", "%s \", elt) # if 1 of the expected preload", "request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload) module_instance =", "# pylint: disable=broad-except self.__logger.error(\"Impossible to find the stack %s in", "%s \", elt) instance_id = self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id", "\"\"\" for elt in self.vnf_infos['list']: vf_config = {} # we", "attributes for instance, vnf and module VF self.service_infos = {}", "self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in AAI DB\") else: return False", "VF self.service_infos = {} self.vnf_infos = {'list': vnf_list} self.module_infos =", "model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service", "and the vnf payload \"\"\" module_info = {} self.__logger.info(\"4) Create", "vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def", "= {} self.__logger.info(\"4) Create MODULE %s instance in SO\", elt)", "the design phase \"\"\" __logger = logging.getLogger(__name__) def __init__(self, **kwargs):", "= self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info", "the service payload * vnf_info: dict including the vnf_id, vnf_related_instance", "create_service_instance(self): \"\"\" Create service instance 2 options to create the", "def create_vnf_instance(self, elt): \"\"\" Create VNF instance Args: * elt:", "to find the stack %s in OpenStack\", stack_name) return check_vnf", "service instance variables from the config file \"\"\" self.vnf_config[\"vnf_name\"] =", "service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id,", "= {} # we cannot be sure that the modules", "sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload) vnf_preload_infos[elt] =", "self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False break else: # preload VNF(s)", "= so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY,", "elt: the VNF \"\"\" vnf_id = None self.__logger.info(\"2) Create VNF", "\"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"]", "VNF \"\"\" self.__logger.info(\" Clean Service Instance \") service_payload = self.components[\"so\"].get_service_payload(", "time.sleep(10) if vnf_ok: # create VF module(s) for elt in", "import onap_tests.utils.stack_checker as sc import onap_tests.utils.utils as onap_utils PROXY =", "vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC", "= ( self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config", "from the config file \"\"\" for elt in self.vnf_infos['list']: vf_config", "self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random part = 6 last char", "Instance %s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self,", "def clean_preload(self, elt): \"\"\" Clean VNF SDNC preload \"\"\" self.__logger.info(\"", "\"\"\" set vnf variables from the config file \"\"\" for", "6 last char of the the vnf name self.vnf_config[\"random_string\"] =", "self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\"", "= {'list': vnf_list} self.module_infos = {'list': vf_module_list} # retrieve infos", "= self.create_module_instance(elt) module_ok = True module_ref = module_info['module_instance'] if not", "+ vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set ONAP", "set_module_var(self): \"\"\" set module variables from the config file \"\"\"", "self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5)", "be sure that the modules are in teh same order", "in the template directory TODO: automate the design phase \"\"\"", "%s\", vnf_info) self.vnf_infos[elt] = vnf_info return vnf_info def preload(self, elt):", "self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"]", "return False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def", "%s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload)", "if \"case\" not in kwargs: # by convention is VNF", "self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\" set service instance variables", "self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "sure that the modules are in teh same order #", "instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service instance", "Module Args: * instance_id: The service instance id of the", "Instance %s \", elt) instance_id = self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"]", "the vnf_id, vnf_related_instance and the vnf payload \"\"\" module_info =", "Version 2.0 # which accompanies this distribution, and is available", "instance 2 options to create the instance * with SO", "already done The yaml template is available and stored in", "VNF instance (SO) * preload the VNF in the SDNC", "= (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id,", "VF module(s) for elt in self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok", "NBI \"\"\" instance_id = None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])", "self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'],", "self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "SDNC self.preload(elt) time.sleep(10) if vnf_ok: # create VF module(s) for", "+ vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"]", "vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\", elt) vnf_ok =", "+ \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\"", "( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"]", "+ \".\" + str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"]", "retrieve infos from the configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components()", "module_payload) self.__logger.info(\">>>> Module instance created: %s\", module_instance) module_info = (", "= None self.__logger.info(\"2) Create VNF instance in SO\") self.__logger.info(\"****************************\") model_info", "vnf_ok = True self.__logger.info(\"Check vnf %s ....\", elt) if not", "= None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1)", "+ vnf_type + \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] + \".\" +", "with ONAP * Create the service instance (SO) * Create", "self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance(", "self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) #", "self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: # create VNF instance(s) for", "def set_vnf_var(self): \"\"\" set vnf variables from the config file", "import onap_tests.components.aai as aai import onap_tests.components.so as so import onap_tests.components.sdnc", "VNF id of the VNF * module_id: the VF module", "been already done The yaml template is available and stored", "elt in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\",", "self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info,", "+ self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"],", "check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully checked\") return", "not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id):", "Service Instance \") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info(", "self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt]", "available at # # http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring #", "model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance =", "(self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id)", "def instantiate(self): \"\"\" Instantiate a VNF with ONAP * Create", "VNF It is assumed that the Design phase has been", "self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\"", "given if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random", "%s instance in SO\", elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'],", "self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info =", "= list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for VNF %s\",", "ID of the VF module instance \"\"\" instance_id = self.service_infos['instance_id']", "assumed that the Design phase has been already done The", "str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\" Instantiate a VNF with ONAP", "elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id'])", "create_vnf_instance(self, elt): \"\"\" Create VNF instance Args: * elt: the", "aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger)", "self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self):", "\"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] + \".\"", "# retrieve infos from the configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var()", "= self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload", "in self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok = True module_ref =", "in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random part = 6", "check_vnf = False self.__logger.info(\"Start the instantiation of the VNF\") instance_info", "self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param(", "\"_\") + \"_\" + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload", "True except Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible to find the", "the stack %s in OpenStack\", stack_name) return check_vnf def clean_instance(self,", "#!/usr/bin/python # # This program and the accompanying materials #", "* with SO * with NBI \"\"\" instance_id = None", "self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" +", "for elt in self.vnf_infos['list']: vf_config = {} # we cannot", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] =", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] =", "vnf payload \"\"\" module_info = {} self.__logger.info(\"4) Create MODULE %s", "instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf} def clean(self): \"\"\"", "the terms of the Apache License, Version 2.0 # which", "= self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else:", "2 options to create the instance * with SO *", "self.vnf_config[\"vnf\"], request_info, model_info) instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\":", "the VNF \"\"\" self.__logger.info(\" Clean Service Instance \") service_payload =", "Args: * elt: the VNF \"\"\" vnf_preload_infos = {} self.__logger.info(\"3)", "payload: %s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info =", "with NBI \"\"\" instance_id = None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'],", "= ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name", "if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance from NBI\") self.__logger.info(\"***********************************\") request_info", "self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\" set vnf", "http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring # pylint: disable=duplicate-code import logging", "clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self): \"\"\"", "instance_id, \"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance created: %s\", service_instance_info)", "'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s) created: %s\", module_info)", "\"nbi\" in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can be useful", "self.__logger.info(\"get VNF %s info\", elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] =", "is available and stored in the template directory TODO: automate", "mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\" in", "self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id = self.components[\"so\"].create_instance(", "list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for VNF %s\", elt)", "\"\"\" Clean VNF Args: * instance_id: The service instance of", "= aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY,", "service instance of the VNF * vnf_id:The VNF id of", "\"_\") + (\"_\") + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload", "in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can be useful to", "enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get VNF %s info\", elt) vnf_config[\"vnf_customization_name\"]", "[\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id) def", "self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"],", "\".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" +", "\"\"\" self.__logger.info(\" Clean vnf Instance %s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"],", "Clean VNF SDNC preload with the preload id \"\"\" self.__logger.info(\"", "create the instance * with SO * with NBI \"\"\"", "elt in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"])", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"]", "* Create the VF module instance (SO) \"\"\" instance_info =", "vnf_config = {} self.__logger.info(\"get VNF %s info\", elt) vnf_config[\"vnf_customization_name\"] =", "self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance from NBI\") self.__logger.info(\"***********************************\") request_info =", "with SO * with NBI \"\"\" instance_id = None model_info", "* vnf_info: dict including the vnf_id, vnf_related_instance and the vnf", "\"module_info\": module_info, \"check_heat\": check_vnf} def clean(self): \"\"\" Clean VNF from", "\"\"\" Check VNF stack has been properly started \"\"\" check_vnf", "clean_preload def clean_all_preload(self): \"\"\" Clean VNF SDNC preload with the", "self.__logger.info(\" Clean Service Instance \") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info(", "\"\"\" Create VNF instance Args: * elt: the VNF \"\"\"", "directory TODO: automate the design phase \"\"\" __logger = logging.getLogger(__name__)", "pylint: disable=missing-docstring # pylint: disable=duplicate-code import logging import time import", "elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \",", "files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\" set service", "VNF from ONAP Args: instance_id: The ID of the VNF", "* elt: the VNF \"\"\" vnf_preload_infos = {} self.__logger.info(\"3) Preload", "# which accompanies this distribution, and is available at #", "\"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config = {} self.components =", "service_instance_info def create_vnf_instance(self, elt): \"\"\" Create VNF instance Args: *", "self.__logger.info(\"Complete Module info for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param(", "info for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\"", "set module variables from the config file \"\"\" for elt", "self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order(", "module_ok = True module_ref = module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"],", "disable=missing-docstring # pylint: disable=duplicate-code import logging import time import onap_tests.components.aai", "service_instance_info) self.service_infos = service_instance_info return service_instance_info def create_vnf_instance(self, elt): \"\"\"", "= self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: #", "vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf} def clean(self): \"\"\" Clean VNF", "is assumed that the Design phase has been already done", "and stored in the template directory TODO: automate the design", "self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance from NBI\")", "import onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker as sc import onap_tests.utils.utils", "if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in AAI DB\") else:", "False self.__logger.info(\"Start the instantiation of the VNF\") instance_info = self.create_service_instance()", "* elt: the VNF \"\"\" vnf_id = None self.__logger.info(\"2) Create", "self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\")", "self.create_module_instance(elt) module_ok = True module_ref = module_info['module_instance'] if not self.components[\"aai\"].check_module_instance(", "check_vnf = False try: my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf", "+ self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id =", "the VNF \"\"\" vnf_preload_infos = {} self.__logger.info(\"3) Preload VNF %s", "module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s)", "preload(self, elt): \"\"\" Preload VNF in SDNC Args: * elt:", "\"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance created: %s\", service_instance_info) self.service_infos", "else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") +", "\"_\" + self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": (", "self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload)", "the SDNC * Create the VF module instance (SO) \"\"\"", "\", elt) instance_id = self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id =", "objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger)", "request_info, model_info) instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\": instance_id,", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def", "vnf_id, module_id) def clean_preload(self, elt): \"\"\" Clean VNF SDNC preload", "'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module", "sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\" Create module instance", "self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\":", "onap_tests.utils.utils as onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF:", "= onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete", "self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\", vnf_payload)", "vnf_topology_identifier = { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"],", "= {} if \"case\" not in kwargs: # by convention", "preload id \"\"\" self.__logger.info(\" Clean Preload \") for elt in", "kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\" in kwargs:", "\"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\": check_vnf} def clean(self):", "set mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\"", "self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload(", "and module VF self.service_infos = {} self.vnf_infos = {'list': vnf_list}", "# are made available under the terms of the Apache", "\"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\"", "def clean_module(self, elt): \"\"\" Clean VNF Module Args: * instance_id:", "+ str(elt).replace(\" \", \"_\") + (\"_\") + self.vnf_config['random_string']) request_info =", "class Solution(object): \"\"\" VNF: Class to automate the instantiation of", "%s info\", elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "module VF self.service_infos = {} self.vnf_infos = {'list': vnf_list} self.module_infos", "* preload the VNF in the SDNC * Create the", "\"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"]", "check_vnf def clean_instance(self, instance_id): \"\"\" Clean VNF instance Args: *", "self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self): \"\"\" Get VNFs Info \"\"\"", "Clean VNF instance Args: * instance_id: The service instance of", "available and stored in the template directory TODO: automate the", "self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self): \"\"\" Clean VNF SDNC", "vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" +", "kwargs: # by convention is VNF is not precised we", "self.__logger.info(\"4) Create MODULE %s instance in SO\", elt) self.__logger.info(\"***************************************\") module_model_info", "vnf_info def preload(self, elt): \"\"\" Preload VNF in SDNC Args:", "import time import onap_tests.components.aai as aai import onap_tests.components.so as so", "%s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt]", "the preload id \"\"\" self.__logger.info(\" Clean Preload \") for elt", "self.__logger.info(\"1) Create Service instance in SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info(", "instance_id, vnf_id, module_id) def clean_preload(self, elt): \"\"\" Clean VNF SDNC", "%s ....\", elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False", "preload answer: %s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload})", "for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" +", "Args: * instance_id: The service instance of the VNF *", "\"\"\" set service instance variables from the config file \"\"\"", "= vnf_info return vnf_info def preload(self, elt): \"\"\" Preload VNF", "self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: # create", "\"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\" set", "i, elt in enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get VNF %s", "module_ref = {\"instanceId\": \"\"} module_ok = False check_vnf = False", "pylint: disable=broad-except self.__logger.error(\"Impossible to find the stack %s in OpenStack\",", "Get VNFs Info \"\"\" self.__logger.info(\"Class to manage VNFs\") self.__logger.info(\"VNF config:", "Set ONAP component objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"]", "self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance", "vnf_preload_infos = {} self.__logger.info(\"3) Preload VNF %s in SDNC\", elt)", "Create VNF instance Args: * elt: the VNF \"\"\" vnf_id", "vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] =", "+ \"_\" + kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"]) vnf_list =", "self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info", "last char of the the vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:]", "request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info,", "vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"]", "service_instance_info return service_instance_info def create_vnf_instance(self, elt): \"\"\" Create VNF instance", "{\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO", "kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can be useful to destroy", "+ self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"],", "return False time.sleep(10) self.clean_preload(elt) return True def create_service_instance(self): \"\"\" Create", "self.components = {} if \"case\" not in kwargs: # by", "import onap_tests.components.so as so import onap_tests.components.sdnc as sdnc import onap_tests.components.nbi", "self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload,", "self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt): \"\"\" Clean VNF Args: *", "vnf_id, vnf_related_instance and the vnf payload \"\"\" module_info = {}", "VF module id of the VNF \"\"\" self.__logger.info(\" Clean Module", "set service instance variables from the config file \"\"\" self.vnf_config[\"vnf_name\"]", "module name shall be given if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"]", "elt): \"\"\" Create VNF instance Args: * elt: the VNF", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\" set vnf variables", "= ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = (", "1 of the expected preload clean is FAIL we return", "# http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring # pylint: disable=duplicate-code import", "module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s) created: %s\",", "= self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\":", "vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for VNF", "self.__logger.info(\"2) Create VNF instance in SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info(", "vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\" Create module instance Args: *", "The service instance id of the VNF * vnf_id:The VNF", "(SO) * Create the VNF instance (SO) * preload the", "id of the VNF \"\"\" self.__logger.info(\" Clean vnf Instance %s", "self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\" Clean VNF Module Args:", "( onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"])", "True self.__logger.info(\"Check vnf %s ....\", elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]):", "instance in SO\", elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'],", "self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self): \"\"\" Clean VNF", "vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\":", "vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance}", "\"\"\" Preload VNF in SDNC Args: * elt: the VNF", "= self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self): \"\"\" Get", "\"case\" not in kwargs: # by convention is VNF is", "vnf_type + \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] + \".\" + str(elt)", "self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"]", "sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except Exception: # pylint:", "the VNF in the SDNC * Create the VF module", "of the expected preload clean is FAIL we return False", "\"\"\" __logger = logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\"", "self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: # create VNF instance(s) for elt", "kwargs[\"sdnc_vnf_name\"] # Random part = 6 last char of the", "+ str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) vnf_topology_identifier =", "# pylint: disable=duplicate-code import logging import time import onap_tests.components.aai as", "self.__logger.info(\"1) Create Service instance from NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info()", "= sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\"", "for elt in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"]", "\"\"} module_ok = False check_vnf = False self.__logger.info(\"Start the instantiation", "get_info(self): \"\"\" Get VNFs Info \"\"\" self.__logger.info(\"Class to manage VNFs\")", "+ \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type", "* instance_info: dict including the instance_id, the request_info and the", "precised we set mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"]", "resources, sdnc module name shall be given if \"sdnc_vnf_name\" in", "module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance", "self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance", "module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload", "self.service_infos['instance_id'] for elt in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id =", "vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"]", "directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully checked\")", "as onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF: Class", "self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if", "the VNF * module_id: the VF module id of the", "self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\")", "%s in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\"", "not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id):", "return True def create_service_instance(self): \"\"\" Create service instance 2 options", "Module VF Instance %s \", elt) instance_id = self.service_infos[\"instance_id\"] vnf_id", "self.vnf_infos['list']: vf_config = {} # we cannot be sure that", "TODO: automate the design phase \"\"\" __logger = logging.getLogger(__name__) def", "self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] =", "{} self.__logger.info(\"3) Preload VNF %s in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name", "self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \",", "{\"instanceId\": \"\"} module_ok = False check_vnf = False self.__logger.info(\"Start the", "vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\"))", "def clean_all_preload(self): \"\"\" Clean VNF SDNC preload with the preload", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = (", "self.module_infos = {'list': vf_module_list} # retrieve infos from the configuration", "ONAP Args: instance_id: The ID of the VNF service instance", "of the the vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"]", "available under the terms of the Apache License, Version 2.0", "model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf(", "a VNF with ONAP * Create the service instance (SO)", "Clean Module VF Instance %s \", elt) instance_id = self.service_infos[\"instance_id\"]", "# than the vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type", "phase has been already done The yaml template is available", "vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\") vnf_param", "= ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self, elt):", "from the config file \"\"\" for i, elt in enumerate(self.vnf_infos['list']):", "vnf_info return vnf_info def preload(self, elt): \"\"\" Preload VNF in", "under the terms of the Apache License, Version 2.0 #", "for i, elt in enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get VNF", "= self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = (", "+ \"-service-instance-\" + self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info)", "(SO) * preload the VNF in the SDNC * Create", "clean(self): \"\"\" Clean VNF from ONAP Args: instance_id: The ID", "self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\"", "vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" +", "\"\"\" instance_info = {\"instance_id\": \"\"} vnf_info = {\"vnf_id\": \"\"} module_info", "the config file \"\"\" for elt in self.vnf_infos['list']: vf_config =", "= (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\" \", \"_\") + (\"_\")", "vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf", "\") for elt in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type'])", "module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']))", "elt): \"\"\" Clean VNF Module Args: * instance_id: The service", "for elt in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return", "convention is VNF is not precised we set mrf kwargs[\"case\"]", "we return False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload", "(self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else:", "def __init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config =", "False check_vnf = False self.__logger.info(\"Start the instantiation of the VNF\")", "Args: * instance_info: dict including the instance_id, the request_info and", "\"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\" in kwargs: self.vnf_config[\"nbi\"] =", "vnf and module VF self.service_infos = {} self.vnf_infos = {'list':", "onap_tests.components.so as so import onap_tests.components.sdnc as sdnc import onap_tests.components.nbi as", "in self.vnf_infos['list']: vf_config = {} # we cannot be sure", "self.__logger.info(\"Check vnf %s ....\", elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok", "vnf_payload) vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\":", "my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible", "SDNC preload with the preload id \"\"\" self.__logger.info(\" Clean Preload", "module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance created:", "+ \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = (", "VNF SDNC preload \"\"\" self.__logger.info(\" Clean Preload of %s \",", "= self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self): \"\"\" Clean", "+ self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param(", "my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except Exception:", "* Create the service instance (SO) * Create the VNF", "than the vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type =", "= kwargs[\"sdnc_vnf_name\"] # Random part = 6 last char of", "False try: my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True", "vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") +", "\"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) vnf_topology_identifier", "self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def instantiate(self):", "vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload:", "self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param(", "vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set ONAP component", "self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\":", "Clean VNF SDNC preload \"\"\" self.__logger.info(\" Clean Preload of %s", "nbi import onap_tests.utils.stack_checker as sc import onap_tests.utils.utils as onap_utils PROXY", "instance_info: dict including the instance_id, the request_info and the service", "self.__logger.info(\"Start the instantiation of the VNF\") instance_info = self.create_service_instance() service_ok", "check_vnf = True except Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible to", "module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO", "+ self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']),", "vnf Instance %s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def", "set_onap_components(self): \"\"\" Set ONAP component objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY,", "The yaml template is available and stored in the template", "in SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" +", "service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id = self.components[\"so\"].create_instance( service_payload)", "vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"]", "if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break else:", "Service instance in SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] +", "onap_tests.utils.stack_checker as sc import onap_tests.utils.utils as onap_utils PROXY = onap_utils.get_config(\"general.proxy\")", "# if 1 of the expected preload clean is FAIL", "VNF with ONAP * Create the service instance (SO) *", "else: # check VNF using OpenStack directly check_vnf = self.check_vnf(", "= True module_ref = module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]):", "in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt)", "self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info,", "check VNF using OpenStack directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if", "onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF: Class to automate the instantiation", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param(", "id of the VNF * module_id: the VF module id", "id of the VNF * vnf_id:The VNF id of the", "self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload)", "vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf instance created %s\", vnf_info)", "False break else: # preload VNF(s) in SDNC self.preload(elt) time.sleep(10)", "= module_info return module_info def check_vnf(self, stack_name): \"\"\" Check VNF", "module variables from the config file \"\"\" for elt in", "Create the VF module instance (SO) \"\"\" instance_info = {\"instance_id\":", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self): \"\"\" set vnf variables from", "self.clean_preload(elt) return True def create_service_instance(self): \"\"\" Create service instance 2", "( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id = (", "module_info, \"check_heat\": check_vnf} def clean(self): \"\"\" Clean VNF from ONAP", "request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string']) service_payload =", "= ( {'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info,", "\"\"\" Clean VNF Module Args: * instance_id: The service instance", "( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"]", "VNF %s info\", elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param(", "config file \"\"\" for i, elt in enumerate(self.vnf_infos['list']): vnf_config =", "self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'],", "def set_module_var(self): \"\"\" set module variables from the config file", "[\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else: self.clean_vnf(elt)", "\", \"_\") + (\"_\") + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name)", "vnf_id:The VNF id of the VNF * module_id: the VF", "NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info", "sc import onap_tests.utils.utils as onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object):", "the VF module instance \"\"\" instance_id = self.service_infos['instance_id'] for elt", "vnf_ok: # create VF module(s) for elt in self.vnf_infos['list']: module_info", "instance * with SO * with NBI \"\"\" instance_id =", "self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self): \"\"\" Get VNFs Info", "sdnc module name shall be given if \"sdnc_vnf_name\" in kwargs:", "has been properly started \"\"\" check_vnf = False try: my_stack_checker", "properly started \"\"\" check_vnf = False try: my_stack_checker = sc.StackChecker()", "= {} module_ref = {\"instanceId\": \"\"} module_ok = False check_vnf", "'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s) created: %s\", module_info) self.module_infos[elt] =", "the modules are in teh same order # than the", "self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"] =", "elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False break else:", "time.sleep(10) self.clean_preload(elt) return True def create_service_instance(self): \"\"\" Create service instance", "module_ref = module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok =", "self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload,", "\".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set ONAP component objects \"\"\"", "else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in AAI", "= self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\"", "kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\")", "The ID of the VNF service instance vnf_id: The ID", "self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info", "+ \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"]", "VNF in the SDNC * Create the VF module instance", "the VNF \"\"\" self.__logger.info(\" Clean Module VF Instance %s \",", "MODULE %s instance in SO\", elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info(", "OpenStack directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully", "self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes for instance, vnf and module", "self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance)", "elt) # if 1 of the expected preload clean is", "self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info =", "= kwargs[\"nbi\"] # can be useful to destroy resources, sdnc", "= {} self.vnf_infos = {'list': vnf_list} self.module_infos = {'list': vf_module_list}", "self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set ONAP component objects \"\"\" self.components[\"aai\"]", "\"check_heat\": check_vnf} def clean(self): \"\"\" Clean VNF from ONAP Args:", "instance (SO) * Create the VNF instance (SO) * preload", "\"\"\" check_vnf = False try: my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name):", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] =", "False time.sleep(10) self.clean_preload(elt) return True def create_service_instance(self): \"\"\" Create service", "module(s) for elt in self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok =", "self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'],", "\"_\") + \"_\" + self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\": vnf_name,", "\"\"\" vnf_preload_infos = {} self.__logger.info(\"3) Preload VNF %s in SDNC\",", "= ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service instance in", "char of the the vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else:", "vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not", "vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator())", "{'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance})", "\"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] =", "# # http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring # pylint: disable=duplicate-code", "useful to destroy resources, sdnc module name shall be given", "Clean VNF from ONAP Args: instance_id: The ID of the", "except Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible to find the stack", "# Random part = 6 last char of the the", "preload the VNF in the SDNC * Create the VF", "Create module instance Args: * instance_info: dict including the instance_id,", "elt): \"\"\" Create module instance Args: * instance_info: dict including", "elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module info for", "AAI DB\") else: return False time.sleep(10) self.clean_preload(elt) return True def", "str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info(", "= self.components[\"so\"].get_request_info( module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"],", "\"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self):", "the VNF\") instance_info = self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"])", "{} self.vnf_infos = {'list': vnf_list} self.module_infos = {'list': vf_module_list} #", "Create Service instance in SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"]", "self.__logger.error(\"Impossible to find the stack %s in OpenStack\", stack_name) return", "the instance * with SO * with NBI \"\"\" instance_id", "module_ok = False check_vnf = False self.__logger.info(\"Start the instantiation of", "+ (\"_\") + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload =", "service_payload) service_instance_info = {\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service", "= False self.__logger.info(\"Start the instantiation of the VNF\") instance_info =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # pylint: disable=missing-docstring # pylint:", "def create_service_instance(self): \"\"\" Create service instance 2 options to create", "the configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\"", "def set_service_instance_var(self): \"\"\" set service instance variables from the config", "self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name =", "in enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get VNF %s info\", elt)", "= 6 last char of the the vnf name self.vnf_config[\"random_string\"]", "* module_id: the VF module id of the VNF \"\"\"", "stack_name): \"\"\" Check VNF stack has been properly started \"\"\"", "kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\"))", "(self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") + \"_\" +", "created: %s\", module_info) self.module_infos[elt] = module_info return module_info def check_vnf(self,", "{'list': vf_module_list} # retrieve infos from the configuration files self.set_service_instance_var()", "instantiate(self): \"\"\" Instantiate a VNF with ONAP * Create the", "self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info,", "self.__logger.info(\" Clean Preload of %s \", elt) # if 1", "\"\"\" Create service instance 2 options to create the instance", "vnf_ok = False break else: # preload VNF(s) in SDNC", "self.service_infos = service_instance_info return service_instance_info def create_vnf_instance(self, elt): \"\"\" Create", "= self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance)", "if \"nbi\" in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can be", "VNF instance module_id: The ID of the VF module instance", "( {'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance':", "request_info = self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi(", "VNF using OpenStack directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf:", "instance in SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'],", "Create MODULE %s instance in SO\", elt) self.__logger.info(\"***************************************\") module_model_info =", "instance_id = self.service_infos['instance_id'] for elt in self.vnf_infos['list']: vnf_id = self.vnf_infos[elt][\"vnf_id\"]", "self.__logger.info(\"Stack successfully checked\") return {\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info,", "self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload =", "( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service instance in SO\")", "\".\" + str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] =", "instance created: %s\", module_instance) module_info = ( {'module_instance': module_instance, 'module_instance_name':", "Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config = {} self.components = {}", "set_vnf_var(self): \"\"\" set vnf variables from the config file \"\"\"", "self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance", "\") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']))", "check_vnf: self.__logger.info(\"Stack successfully checked\") return {\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\":", "\", elt) # if 1 of the expected preload clean", "self.vnf_infos[elt] = vnf_info return vnf_info def preload(self, elt): \"\"\" Preload", "onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker as sc import onap_tests.utils.utils as", "self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt): \"\"\" Clean VNF Args:", "This program and the accompanying materials # are made available", "break else: # preload VNF(s) in SDNC self.preload(elt) time.sleep(10) if", "instance_info = self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok:", "+ \"_\" + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name) module_payload =", "+ vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\"", "self.__logger.info(\">>>> Module instance created: %s\", module_instance) module_info = ( {'module_instance':", "{\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance created: %s\",", "sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload) vnf_preload_infos[elt]", "elt in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload", "the VNF * vnf_id:The VNF id of the VNF \"\"\"", "return False else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else:", "instance in SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\"", "VNF instance Args: * instance_id: The service instance of the", "= {\"vnf_id\": \"\"} module_info = {} module_ref = {\"instanceId\": \"\"}", "SDNC Args: * elt: the VNF \"\"\" vnf_preload_infos = {}", "options to create the instance * with SO * with", "create VF module(s) for elt in self.vnf_infos['list']: module_info = self.create_module_instance(elt)", "instance created %s\", vnf_info) self.vnf_infos[elt] = vnf_info return vnf_info def", "Class attributes for instance, vnf and module VF self.service_infos =", "vnf_related_instance} self.__logger.info(\">>>> SO vnf instance created %s\", vnf_info) self.vnf_infos[elt] =", "order # than the vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt)", "VNF SDNC preload with the preload id \"\"\" self.__logger.info(\" Clean", "self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"],", "VF module instance (SO) \"\"\" instance_info = {\"instance_id\": \"\"} vnf_info", "def create_module_instance(self, elt): \"\"\" Create module instance Args: * instance_info:", "instance created: %s\", service_instance_info) self.service_infos = service_instance_info return service_instance_info def", "payload \"\"\" module_info = {} self.__logger.info(\"4) Create MODULE %s instance", "vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload", "elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\")", "+ str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param(", "Clean Preload \") for elt in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload(", "in SDNC Args: * elt: the VNF \"\"\" vnf_preload_infos =", "elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\" Clean", "= kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = (", "self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\", sdnc_payload) sdnc_preload =", "self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\" \",", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = (", "vf(s) created: %s\", module_info) self.module_infos[elt] = module_info return module_info def", "clean is FAIL we return False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"],", "= vnf_config def set_module_var(self): \"\"\" set module variables from the", "onap_utils PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF: Class to", "instance vnf_id: The ID of the VNF instance module_id: The", "info\", elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\"", "instance (SO) * preload the VNF in the SDNC *", "as so import onap_tests.components.sdnc as sdnc import onap_tests.components.nbi as nbi", "= self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\",", "self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else: self.clean_vnf(elt) if", "vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\"))", "def preload(self, elt): \"\"\" Preload VNF in SDNC Args: *", "instance_id: The service instance of the VNF \"\"\" self.__logger.info(\" Clean", "configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\" set", "vf_module_list} # retrieve infos from the configuration files self.set_service_instance_var() self.set_vnf_var()", "VNF * vnf_id:The VNF id of the VNF * module_id:", "ONAP component objects \"\"\" self.components[\"aai\"] = aai.Aai(PROXY, self.__logger) self.components[\"so\"] =", "\"_\" + kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param(", "= module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False", "+ \"_\" + self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list", "= self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id,", "VNF service instance vnf_id: The ID of the VNF instance", "VNF is not precised we set mrf kwargs[\"case\"] = \"mrf\"", "Check VNF stack has been properly started \"\"\" check_vnf =", "onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index] self.__logger.info(\"Complete Module", "\"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "\".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] + \".\" + str(elt) + \".vnf_parameters\")", "(self.vnf_config[\"vnf\"] + \".\" + str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param)", "be given if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] #", "self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\",", "self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break else: # check", "done The yaml template is available and stored in the", "= {'list': vf_module_list} # retrieve infos from the configuration files", "aai import onap_tests.components.so as so import onap_tests.components.sdnc as sdnc import", "created: %s\", module_instance) module_info = ( {'module_instance': module_instance, 'module_instance_name': module_instance_name,", "* vnf_id:The VNF id of the VNF * module_id: the", "we set mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if", "module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") +", "Apache License, Version 2.0 # which accompanies this distribution, and", "\"\"\" instance_id = None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if", "as aai import onap_tests.components.so as so import onap_tests.components.sdnc as sdnc", "False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in", "VNFs Info \"\"\" self.__logger.info(\"Class to manage VNFs\") self.__logger.info(\"VNF config: %s\",", "so import onap_tests.components.sdnc as sdnc import onap_tests.components.nbi as nbi import", "module_payload, instance_id, vnf_id, module_id) def clean_preload(self, elt): \"\"\" Clean VNF", "vnf_id:The VNF id of the VNF \"\"\" self.__logger.info(\" Clean vnf", "\"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\"", "of the Apache License, Version 2.0 # which accompanies this", "\"\"} vnf_info = {\"vnf_id\": \"\"} module_info = {} module_ref =", "instance_id): self.__logger.debug(\"Instance still in AAI DB\") else: return False time.sleep(10)", "this distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def set_onap_components(self): \"\"\" Set ONAP component objects \"\"\" self.components[\"aai\"] =", "onap_tests.components.sdnc as sdnc import onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker as", "= {\"instanceId\": \"\"} module_ok = False check_vnf = False self.__logger.info(\"Start", "module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break else: # check VNF using", "module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id) def clean_preload(self,", "VNF instance(s) for elt in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check", "vnf %s ....\", elt) vnf_ok = True self.__logger.info(\"Check vnf %s", "service instance 2 options to create the instance * with", "\"\"\" Instantiate a VNF with ONAP * Create the service", "request_info = self.components[\"so\"].get_request_info( vnf_instance_name) vnf_payload = self.components[\"so\"].get_vnf_payload( self.vnf_config[\"vnf\"], request_info, model_info,", "the VNF * vnf_id:The VNF id of the VNF *", "= {\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance created:", "self.vnf_infos = {'list': vnf_list} self.module_infos = {'list': vf_module_list} # retrieve", "instance Args: * elt: the VNF \"\"\" vnf_id = None", "instance_id = self.service_infos[\"instance_id\"] vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"])", "self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully checked\") return {\"status\": module_ok,", "self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\" Clean VNF Module", "{} self.__logger.info(\"4) Create MODULE %s instance in SO\", elt) self.__logger.info(\"***************************************\")", "in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def", "answer: %s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return", "service_payload) def clean_vnf(self, elt): \"\"\" Clean VNF Args: * instance_id:", "\"\"\" self.__logger.info(\" Clean Preload \") for elt in self.vnf_infos['list']: clean_preload", "* with NBI \"\"\" instance_id = None model_info = self.components[\"so\"].get_service_model_info(", "variables from the config file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "super(Solution, self).__init__() self.vnf_config = {} self.components = {} if \"case\"", "return module_info def check_vnf(self, stack_name): \"\"\" Check VNF stack has", "({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\"", "\", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\"", "vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"]", "sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self, elt): \"\"\" Create", "= \"mrf\" self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\" in kwargs: self.vnf_config[\"nbi\"]", "self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt): \"\"\"", "module_info = {} module_ref = {\"instanceId\": \"\"} module_ok = False", "preload with the preload id \"\"\" self.__logger.info(\" Clean Preload \")", "# can be useful to destroy resources, sdnc module name", "service instance vnf_id: The ID of the VNF instance module_id:", "module_id: the VF module id of the VNF \"\"\" self.__logger.info(\"", "self.vnf_config[\"vnf\"], request_info, model_info, vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id", "\"\"\" VNF: Class to automate the instantiation of a VNF", "= True except Exception: # pylint: disable=broad-except self.__logger.error(\"Impossible to find", "= (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") + \"_\"", "+ \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type +", "the VNF instance module_id: The ID of the VF module", "self.vnf_config = {} self.components = {} if \"case\" not in", "elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" +", "return service_instance_info def create_vnf_instance(self, elt): \"\"\" Create VNF instance Args:", "elt) vnf_ok = True self.__logger.info(\"Check vnf %s ....\", elt) if", "self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\", elt) vnf_ok", "service_payload) time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create", "module_id) def clean_preload(self, elt): \"\"\" Clean VNF SDNC preload \"\"\"", "VNF \"\"\" vnf_id = None self.__logger.info(\"2) Create VNF instance in", "self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance created: %s\", module_instance)", "self.__logger.info(\" Clean vnf Instance %s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"],", "of the VNF * vnf_id:The VNF id of the VNF", "service instance of the VNF \"\"\" self.__logger.info(\" Clean Service Instance", "SDNC * Create the VF module instance (SO) \"\"\" instance_info", "self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload) module_instance", "\".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type +", "the instantiation of a VNF It is assumed that the", "instance Args: * instance_id: The service instance of the VNF", "materials # are made available under the terms of the", "+ \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] +", "of the VNF\") instance_info = self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"],", "= {} self.__logger.info(\"get VNF %s info\", elt) vnf_config[\"vnf_customization_name\"] = elt", "= True self.__logger.info(\"Check vnf %s ....\", elt) if not self.components[\"aai\"].check_vnf_instance(", "self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt): \"\"\" Clean VNF", "variables from the config file \"\"\" for i, elt in", "of the VNF \"\"\" self.__logger.info(\" Clean vnf Instance %s \",", "self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\" Instantiate a", "from NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload())", "\".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\")", "clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self): \"\"\"", "\"\"\" module_info = {} self.__logger.info(\"4) Create MODULE %s instance in", "vnf_related_instance and the vnf payload \"\"\" module_info = {} self.__logger.info(\"4)", "Preload \") for elt in self.vnf_infos['list']: clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"],", "Create service instance 2 options to create the instance *", "Clean Preload of %s \", elt) # if 1 of", "elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\")", "self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\" Instantiate a VNF", "Preload of %s \", elt) # if 1 of the", "\"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def set_module_var(self): \"\"\" set", "vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"]", "self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id) def clean_preload(self, elt): \"\"\"", "config file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] =", "License, Version 2.0 # which accompanies this distribution, and is", "self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] +", "the the vnf name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] =", "self.preload(elt) time.sleep(10) if vnf_ok: # create VF module(s) for elt", "Info \"\"\" self.__logger.info(\"Class to manage VNFs\") self.__logger.info(\"VNF config: %s\", self.vnf_config)", "* Create the VNF instance (SO) * preload the VNF", "instance variables from the config file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param(", "= False break else: # preload VNF(s) in SDNC self.preload(elt)", "def get_info(self): \"\"\" Get VNFs Info \"\"\" self.__logger.info(\"Class to manage", "%s in OpenStack\", stack_name) return check_vnf def clean_instance(self, instance_id): \"\"\"", "self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id):", "stored in the template directory TODO: automate the design phase", "the template directory TODO: automate the design phase \"\"\" __logger", "self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self): \"\"\" Clean VNF SDNC preload", "= (self.vnf_config[\"vnf\"] + \".\" + str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"] =", "self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" + self.vnf_config[\"random_string\"])", "vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\": vnf_id,", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\") def set_vnf_var(self):", "instantiation of a VNF It is assumed that the Design", "has been already done The yaml template is available and", "created %s\", vnf_info) self.vnf_infos[elt] = vnf_info return vnf_info def preload(self,", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param(", "= self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id, vnf_id, module_id) def clean_preload(self, elt):", "# create VF module(s) for elt in self.vnf_infos['list']: module_info =", "SDNC preload \"\"\" self.__logger.info(\" Clean Preload of %s \", elt)", "FAIL we return False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return", "self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\"", "None self.__logger.info(\"2) Create VNF instance in SO\") self.__logger.info(\"****************************\") model_info =", "service payload * vnf_info: dict including the vnf_id, vnf_related_instance and", "self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance created: %s\", module_instance) module_info", "= self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"], vnf_payload) vnf_info = {\"vnf_id\": vnf_id, \"vnf_instance_name\": vnf_instance_name,", "vf_config[\"vnf_parameters\"] = onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type", "elt in self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok = True module_ref", "= self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload)", "else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id) if", "self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok = True module_ref = module_info['module_instance']", "( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] =", "= {} self.__logger.info(\"3) Preload VNF %s in SDNC\", elt) self.__logger.info(\"*******************************\")", "self.__logger.debug(\"Instance still in AAI DB\") else: return False time.sleep(10) self.clean_preload(elt)", "self.__logger.info(\"3) Preload VNF %s in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name =", "created: %s\", service_instance_info) self.service_infos = service_instance_info return service_instance_info def create_vnf_instance(self,", "that the Design phase has been already done The yaml", "Service instance from NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload =", "self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"] + \"_\"", "\"\"\" Create module instance Args: * instance_info: dict including the", "The service instance of the VNF * vnf_id:The VNF id", "of the VNF service instance vnf_id: The ID of the", "module_info = ( {'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload': module_payload, 'module_model_info':", "in AAI DB\") else: return False time.sleep(10) self.clean_preload(elt) return True", "vnf_id = self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"]", "True def create_service_instance(self): \"\"\" Create service instance 2 options to", "= ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"]", "self.__logger) def instantiate(self): \"\"\" Instantiate a VNF with ONAP *", "**kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config = {} self.components", "* vnf_id:The VNF id of the VNF \"\"\" self.__logger.info(\" Clean", "payload * vnf_info: dict including the vnf_id, vnf_related_instance and the", "preload \"\"\" self.__logger.info(\" Clean Preload of %s \", elt) #", "\"-vnf-instance-\" + str(elt).replace(\" \", \"_\") + (\"_\") + self.vnf_config['random_string']) request_info", "+ vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\"", "that the modules are in teh same order # than", "self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def", "elt): \"\"\" Clean VNF Args: * instance_id: The service instance", "instance from NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload = (", "distribution, and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "vnf %s ....\", elt) if not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok =", "module_related_instance}) self.__logger.info(\"SO module vf(s) created: %s\", module_info) self.module_infos[elt] = module_info", "checked\") return {\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info,", "self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload,", "stack_name) return check_vnf def clean_instance(self, instance_id): \"\"\" Clean VNF instance", "vnf_info: dict including the vnf_id, vnf_related_instance and the vnf payload", "vf_index = onap_utils.get_vf_module_index( self.module_infos['list'], elt) vnf_type = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[vf_index]", "instance module_id: The ID of the VF module instance \"\"\"", "\"\"\" self.__logger.info(\" Clean Preload of %s \", elt) # if", "self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service instance in SO\") self.__logger.info(\"********************************\")", "elt): \"\"\" Clean VNF SDNC preload \"\"\" self.__logger.info(\" Clean Preload", "if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else: self.clean_vnf(elt) if not", "Module info for VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "= self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>> Module instance created: %s\",", "__init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config = {}", "the instance_id, the request_info and the service payload * vnf_info:", "{\"vnf_id\": \"\"} module_info = {} module_ref = {\"instanceId\": \"\"} module_ok", "VNF id of the VNF \"\"\" self.__logger.info(\" Clean vnf Instance", "from the configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self):", "config file \"\"\" for elt in self.vnf_infos['list']: vf_config = {}", "\"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] + \"/\" +", "Instantiate a VNF with ONAP * Create the service instance", "not self.components[\"aai\"].check_vnf_instance( vnf_info[\"vnf_id\"]): vnf_ok = False break else: # preload", "time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1) Create Service", "instance_id: The ID of the VNF service instance vnf_id: The", "set vnf variables from the config file \"\"\" for i,", "%s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type +", "= ( onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"] + \"_\" +", "self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"]", "instance_info[\"instance_id\"]) if service_ok: # create VNF instance(s) for elt in", "request_info and the service payload * vnf_info: dict including the", "self.vnf_config[\"vnf\"] = kwargs[\"case\"] if \"nbi\" in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"]", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.UUID\") vnf_config[\"vnf_customization_id\"] =", "self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'],", "stack %s in OpenStack\", stack_name) return check_vnf def clean_instance(self, instance_id):", "\"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'],", "service instance (SO) * Create the VNF instance (SO) *", "None model_info = self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create", "logging import time import onap_tests.components.aai as aai import onap_tests.components.so as", "\"\"\" Get VNFs Info \"\"\" self.__logger.info(\"Class to manage VNFs\") self.__logger.info(\"VNF", "else: return False time.sleep(10) self.clean_preload(elt) return True def create_service_instance(self): \"\"\"", "ONAP * Create the service instance (SO) * Create the", "VNF Module Args: * instance_id: The service instance id of", "is VNF is not precised we set mrf kwargs[\"case\"] =", "time import onap_tests.components.aai as aai import onap_tests.components.so as so import", "self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "vnf_info) self.vnf_infos[elt] = vnf_info return vnf_info def preload(self, elt): \"\"\"", "part = 6 last char of the the vnf name", "clean_module(self, elt): \"\"\" Clean VNF Module Args: * instance_id: The", "if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"],", "VNF in SDNC Args: * elt: the VNF \"\"\" vnf_preload_infos", "+ kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "= False break else: # check VNF using OpenStack directly", "Clean VNF Module Args: * instance_id: The service instance id", "the vnf payload \"\"\" module_info = {} self.__logger.info(\"4) Create MODULE", "self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\")", "\"topology_template.groups\")) # Class attributes for instance, vnf and module VF", "try: my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except", "pylint: disable=duplicate-code import logging import time import onap_tests.components.aai as aai", "....\", elt) vnf_ok = True self.__logger.info(\"Check vnf %s ....\", elt)", "( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] =", "( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] =", "be useful to destroy resources, sdnc module name shall be", "PROXY = onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF: Class to automate", "still in AAI DB\") else: return False time.sleep(10) self.clean_preload(elt) return", "Class to automate the instantiation of a VNF It is", "automate the instantiation of a VNF It is assumed that", "SO vnf instance created %s\", vnf_info) self.vnf_infos[elt] = vnf_info return", "of the VNF \"\"\" self.__logger.info(\" Clean Service Instance \") service_payload", "\"\"} module_info = {} module_ref = {\"instanceId\": \"\"} module_ok =", "self.components[\"so\"] = so.So(PROXY, self.__logger) self.components[\"sdnc\"] = sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] =", "been properly started \"\"\" check_vnf = False try: my_stack_checker =", "\"_\" + self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list =", "module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module( module_payload, instance_id,", "Args: * instance_id: The service instance of the VNF \"\"\"", "instance (SO) \"\"\" instance_info = {\"instance_id\": \"\"} vnf_info = {\"vnf_id\":", "self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\" set service instance variables from", "{} self.__logger.info(\"get VNF %s info\", elt) vnf_config[\"vnf_customization_name\"] = elt vnf_config[\"vnf_model_name\"]", "modules are in teh same order # than the vnf", "instance_id: The service instance of the VNF * vnf_id:The VNF", "False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"]) return clean_preload def clean_all_preload(self):", "and is available at # # http://www.apache.org/licenses/LICENSE-2.0 # # pylint:", "name self.vnf_config[\"random_string\"] = kwargs[\"sdnc_vnf_name\"][-6:] else: self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"]", "+ \"-service-instance-\" + self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] + \"/\"", "= (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) self.clean_module(elt) if not self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False", "vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def set_module_var(self): \"\"\" set module variables", "= onap_utils.get_config(vnf_param) vf_config[\"module_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type +", "by convention is VNF is not precised we set mrf", "check_vnf} def clean(self): \"\"\" Clean VNF from ONAP Args: instance_id:", "of the VNF * module_id: the VF module id of", "* instance_id: The service instance of the VNF \"\"\" self.__logger.info(\"", "Create the service instance (SO) * Create the VNF instance", "self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self,", "self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def", "The ID of the VF module instance \"\"\" instance_id =", "\"\"\" Clean VNF SDNC preload \"\"\" self.__logger.info(\" Clean Preload of", "SO * with NBI \"\"\" instance_id = None model_info =", "= { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\":", "a VNF It is assumed that the Design phase has", "payload %s\", module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload) self.__logger.info(\">>>>", "to automate the instantiation of a VNF It is assumed", "= onap_utils.get_config(\"general.proxy\") class Solution(object): \"\"\" VNF: Class to automate the", "self.vnf_config[\"random_string\"] = ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") + \"_\"", "the instantiation of the VNF\") instance_info = self.create_service_instance() service_ok =", "= list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\"", "module id of the VNF \"\"\" self.__logger.info(\" Clean Module VF", "\"\"\" set module variables from the config file \"\"\" for", "instance_id): \"\"\" Clean VNF instance Args: * instance_id: The service", "= self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance", "module instance Args: * instance_info: dict including the instance_id, the", "Args: * elt: the VNF \"\"\" vnf_id = None self.__logger.info(\"2)", "self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"],", "disable=broad-except self.__logger.error(\"Impossible to find the stack %s in OpenStack\", stack_name)", "accompanying materials # are made available under the terms of", "VF Instance %s \", elt) instance_id = self.service_infos[\"instance_id\"] vnf_id =", "+ vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" +", "vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config)", "not in kwargs: # by convention is VNF is not", "= list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) #", "module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module payload %s\", module_payload) module_instance = self.components[\"so\"].create_module(", "vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"]", "successfully checked\") return {\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\":", "VNF %s in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] +", "for instance, vnf and module VF self.service_infos = {} self.vnf_infos", "module_info return module_info def check_vnf(self, stack_name): \"\"\" Check VNF stack", "VNF * vnf_id:The VNF id of the VNF \"\"\" self.__logger.info(\"", "\".metadata.customizationUUID\")) vnf_config[\"vnf_type\"] = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"]", "file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param(", "stack has been properly started \"\"\" check_vnf = False try:", "program and the accompanying materials # are made available under", "sdnc import onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker as sc import", "module instance \"\"\" instance_id = self.service_infos['instance_id'] for elt in self.vnf_infos['list']:", "self.__logger.info(\"Service instance created: %s\", service_instance_info) self.service_infos = service_instance_info return service_instance_info", "\"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "id \"\"\" self.__logger.info(\" Clean Preload \") for elt in self.vnf_infos['list']:", "vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type", "DB\") else: return False time.sleep(10) self.clean_preload(elt) return True def create_service_instance(self):", "Create VNF instance in SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'],", "{\"instance_id\": \"\"} vnf_info = {\"vnf_id\": \"\"} module_info = {} module_ref", "= logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__()", "we cannot be sure that the modules are in teh", "# This program and the accompanying materials # are made", "request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance created: %s\", service_instance_info) self.service_infos =", "vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" +", "check_vnf(self, stack_name): \"\"\" Check VNF stack has been properly started", "in teh same order # than the vnf vf_index =", "= self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: # create VNF instance(s)", "self.vnf_config[\"random_string\"]) vnf_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "instance, vnf and module VF self.service_infos = {} self.vnf_infos =", "service_instance_info = {\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\": service_payload} self.__logger.info(\"Service instance", "and the service payload * vnf_info: dict including the vnf_id,", "in SO\", elt) self.__logger.info(\"***************************************\") module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'],", "# # pylint: disable=missing-docstring # pylint: disable=duplicate-code import logging import", "for elt in self.vnf_infos['list']: module_info = self.create_module_instance(elt) module_ok = True", "of the VNF instance module_id: The ID of the VF", "vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"]", "self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned(", "Preload VNF in SDNC Args: * elt: the VNF \"\"\"", "the expected preload clean is FAIL we return False clean_preload", "phase \"\"\" __logger = logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize Solution", "\".metadata.UUID\") vnf_config[\"vnf_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] +", "the config file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"]", "of the VNF \"\"\" self.__logger.info(\" Clean Module VF Instance %s", "+ self.vnf_config[\"random_string\"]) vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"])", "Clean vnf Instance %s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"])", "instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\": instance_id, \"request_info\": request_info,", "is not precised we set mrf kwargs[\"case\"] = \"mrf\" self.vnf_config[\"vnf\"]", "module_model_info = self.components[\"so\"].get_module_model_info( self.vnf_config[elt]['module_invariant_id'], self.vnf_config[elt]['module_name_version_id'], self.vnf_config[elt]['sdnc_vnf_type'], self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance =", "+ vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\"", "# # This program and the accompanying materials # are", "return clean_preload def get_info(self): \"\"\" Get VNFs Info \"\"\" self.__logger.info(\"Class", "= self.vnf_infos[elt][\"vnf_id\"] module_id = (self.module_infos[elt][\"module_instance\"] [\"requestReferences\"][\"instanceId\"]) module_payload = self.module_infos[elt][\"module_payload\"] self.components[\"so\"].delete_module(", "+ vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" +", "VNF\") instance_info = self.create_service_instance() service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] =", "self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string']) service_payload", "False break else: # check VNF using OpenStack directly check_vnf", "(self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\" \", \"_\") + (\"_\") +", "( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name =", "clean_all_preload(self): \"\"\" Clean VNF SDNC preload with the preload id", "self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" +", "= self.components[\"nbi\"].get_request_info() service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload)", "module_id): return False else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return False", "self.__logger.info(\"SDNC preload payload %s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload", "VNF stack has been properly started \"\"\" check_vnf = False", "self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelInvariantUUID\") vf_config[\"module_name_version_id\"] = ( onap_utils.get_template_param(", "self.__logger.info(\"Check vnf %s ....\", elt) vnf_ok = True self.__logger.info(\"Check vnf", "str(elt).replace(\" \", \"_\") + (\"_\") + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info(", "instance \"\"\" instance_id = self.service_infos['instance_id'] for elt in self.vnf_infos['list']: vnf_id", "Clean Service Instance \") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']),", "in SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'],", "It is assumed that the Design phase has been already", "self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can be useful to destroy resources,", "( self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def", "self.vnf_config['uuid']) vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\" \", \"_\")", "VNF \"\"\" self.__logger.info(\" Clean vnf Instance %s \", elt) self.components[\"so\"].delete_vnf(", "self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\": instance_id, \"request_info\": request_info, \"service_payload\": service_payload}", "vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok = False break else: # check VNF", "Design phase has been already done The yaml template is", "vnf_id = None self.__logger.info(\"2) Create VNF instance in SO\") self.__logger.info(\"****************************\")", "the accompanying materials # are made available under the terms", "yaml template is available and stored in the template directory", "= list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes for instance, vnf", "\"service_payload\": service_payload} self.__logger.info(\"Service instance created: %s\", service_instance_info) self.service_infos = service_instance_info", "+ \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] +", "{'list': vnf_list} self.module_infos = {'list': vf_module_list} # retrieve infos from", "+ \"-vnf-instance-\" + str(elt).replace(\" \", \"_\") + (\"_\") + self.vnf_config['random_string'])", "\".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type +", "\"\"\" self.__logger.info(\" Clean Module VF Instance %s \", elt) instance_id", "infos from the configuration files self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def", "onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.invariantUUID\") vnf_config[\"vnf_version_id\"] = onap_utils.get_template_param(", "= sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf = True except Exception: #", "vnf_preload_infos[elt] = ({\"sdnc_payload\": sdnc_payload, \"sdnc_preload\": sdnc_preload}) return vnf_preload_infos[elt] def create_module_instance(self,", "if vnf_ok: # create VF module(s) for elt in self.vnf_infos['list']:", "Random part = 6 last char of the the vnf", "the VNF \"\"\" self.__logger.info(\" Clean vnf Instance %s \", elt)", "list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class", "# we cannot be sure that the modules are in", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\") self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"]", "from the config file \"\"\" self.vnf_config[\"vnf_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.name\")", "clean_preload(self, elt): \"\"\" Clean VNF SDNC preload \"\"\" self.__logger.info(\" Clean", "%s ....\", elt) vnf_ok = True self.__logger.info(\"Check vnf %s ....\",", "self.vnf_config[elt]['module_customization_id'], self.vnf_config[elt]['module_version_id']) module_related_instance = ( self.components[\"so\"].get_module_related_instance( self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'],", "service_ok = self.components[\"aai\"].check_service_instance( self.vnf_config[\"vnf_name\"], instance_info[\"instance_id\"]) if service_ok: # create VNF", "teh same order # than the vnf vf_index = onap_utils.get_vf_module_index(", "create_module_instance(self, elt): \"\"\" Create module instance Args: * instance_info: dict", "%s\", module_instance) module_info = ( {'module_instance': module_instance, 'module_instance_name': module_instance_name, 'module_payload':", "file \"\"\" for elt in self.vnf_infos['list']: vf_config = {} #", "payload %s\", sdnc_payload) sdnc_preload = self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\",", "* instance_id: The service instance id of the VNF *", "self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still in AAI DB\") else: return", "self.__logger.debug(\"Module payload %s\", module_payload) module_instance = self.components[\"so\"].create_module( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], module_payload)", "+ \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] + \".\" + str(elt) +", "sdnc.Sdnc(PROXY, self.__logger) self.components[\"nbi\"] = nbi.Nbi(PROXY, self.__logger) def instantiate(self): \"\"\" Instantiate", "= False try: my_stack_checker = sc.StackChecker() if my_stack_checker.check_stack_is_complete(stack_name): check_vnf =", "clean_instance(self, instance_id): \"\"\" Clean VNF instance Args: * instance_id: The", "Create the VNF instance (SO) * preload the VNF in", "\"\"\" self.__logger.info(\" Clean Service Instance \") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"],", "in self.vnf_infos['list']: vnf_info = self.create_vnf_instance(elt) self.__logger.info(\"Check vnf %s ....\", elt)", "self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")) vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param(", "Args: instance_id: The ID of the VNF service instance vnf_id:", "def check_vnf(self, stack_name): \"\"\" Check VNF stack has been properly", "model_info) instance_id = self.components[\"so\"].create_instance( service_payload) service_instance_info = {\"instance_id\": instance_id, \"request_info\":", "+ \"_\" + self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\":", "%s\", module_info) self.module_infos[elt] = module_info return module_info def check_vnf(self, stack_name):", "template is available and stored in the template directory TODO:", "import onap_tests.components.sdnc as sdnc import onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker", "self.vnf_infos[elt][\"vnf_id\"], self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name'])) module_instance_name = (self.vnf_config[\"vnf\"] +", "cannot be sure that the modules are in teh same", "ID of the VNF instance module_id: The ID of the", "= ( onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") + \"_\" +", "self.components[\"aai\"].check_module_cleaned(vnf_id, module_id): return False else: self.clean_vnf(elt) if not self.components[\"aai\"].check_vnf_cleaned(vnf_id): return", "module_info = self.create_module_instance(elt) module_ok = True module_ref = module_info['module_instance'] if", "clean_preload def get_info(self): \"\"\" Get VNFs Info \"\"\" self.__logger.info(\"Class to", "template directory TODO: automate the design phase \"\"\" __logger =", "\"-service-instance-\" + self.vnf_config['random_string']) service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], request_info, model_info) instance_id", "as sdnc import onap_tests.components.nbi as nbi import onap_tests.utils.stack_checker as sc", "vnf_related_instance) # self.__logger.debug(\"VNF payload: %s\", vnf_payload) vnf_id = self.components[\"so\"].create_vnf( self.service_infos[\"instance_id\"],", "self.service_infos = {} self.vnf_infos = {'list': vnf_list} self.module_infos = {'list':", "vnf_instance_name = (self.vnf_config[\"vnf\"] + \"-vnf-instance-\" + str(elt).replace(\" \", \"_\") +", "vnf_param = (self.vnf_config[\"vnf\"] + \".\" + str(elt) + \".vnf_parameters\") vf_config[\"vnf_parameters\"]", "self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance( self.service_infos[\"instance_id\"], self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])", "the config file \"\"\" for i, elt in enumerate(self.vnf_infos['list']): vnf_config", "destroy resources, sdnc module name shall be given if \"sdnc_vnf_name\"", "= self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'], self.vnf_config[elt]['vnf_model_name'], self.vnf_config[elt]['vnf_customization_id'], self.vnf_config[elt]['vnf_customization_name']) vnf_related_instance = self.components[\"so\"].get_vnf_related_instance(", "onap_utils.random_string_generator()) self.vnf_config[\"sdnc_vnf_name\"] = ( onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"] +", "self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt]['sdnc_vnf_type']) return clean_preload def get_info(self): \"\"\" Get VNFs", "{} self.components = {} if \"case\" not in kwargs: #", "\"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\"", "self.__logger.info(\" Clean Preload \") for elt in self.vnf_infos['list']: clean_preload =", "def clean(self): \"\"\" Clean VNF from ONAP Args: instance_id: The", "vnf_id: The ID of the VNF instance module_id: The ID", "the VF module instance (SO) \"\"\" instance_info = {\"instance_id\": \"\"}", "module_instance_name) module_payload = self.components[\"so\"].get_module_payload( self.vnf_config[\"vnf\"], request_info, module_model_info, self.vnf_infos[elt][\"vnf_related_instance\"], module_related_instance) self.__logger.debug(\"Module", "as nbi import onap_tests.utils.stack_checker as sc import onap_tests.utils.utils as onap_utils", "else: # preload VNF(s) in SDNC self.preload(elt) time.sleep(10) if vnf_ok:", "vnf_instance_name, \"vnf_payload\": vnf_payload, \"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf instance created", "return check_vnf def clean_instance(self, instance_id): \"\"\" Clean VNF instance Args:", "return {\"status\": module_ok, \"instance_id\": instance_info, \"vnf_info\": vnf_info, \"module_info\": module_info, \"check_heat\":", "object.\"\"\" super(Solution, self).__init__() self.vnf_config = {} self.components = {} if", "vnf_config[\"vnf_generic_type\"] = ( self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] =", "self.vnf_config[\"invariant_uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.invariantUUID\") self.vnf_config[\"uuid\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"metadata.UUID\")", "\"-vfmodule-instance-\" + str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) request_info", "module vf(s) created: %s\", module_info) self.module_infos[elt] = module_info return module_info", "= {} self.components = {} if \"case\" not in kwargs:", "elt in enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get VNF %s info\",", "in SDNC self.preload(elt) time.sleep(10) if vnf_ok: # create VF module(s)", "\"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random part =", "\"\"\" vnf_id = None self.__logger.info(\"2) Create VNF instance in SO\")", "vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes for instance,", "self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt): \"\"\" Clean VNF", "logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize Solution object.\"\"\" super(Solution, self).__init__() self.vnf_config", "str(elt).replace(\" \", \"_\") + \"_\" + self.vnf_config['random_string']) vnf_topology_identifier = {", "return vnf_info def preload(self, elt): \"\"\" Preload VNF in SDNC", "sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier) self.__logger.info(\"SDNC preload payload %s\", sdnc_payload)", "import logging import time import onap_tests.components.aai as aai import onap_tests.components.so", "{} # we cannot be sure that the modules are", "self.module_infos[elt] = module_info return module_info def check_vnf(self, stack_name): \"\"\" Check", "vnf_config def set_module_var(self): \"\"\" set module variables from the config", "design phase \"\"\" __logger = logging.getLogger(__name__) def __init__(self, **kwargs): \"\"\"Initialize", "kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random part = 6 last", "self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'], self.vnf_config['uuid'])) self.components[\"so\"].delete_instance(instance_id, service_payload) def clean_vnf(self, elt):", "self.__logger.info(\">>>> SO vnf instance created %s\", vnf_info) self.vnf_infos[elt] = vnf_info", "True module_ref = module_info['module_instance'] if not self.components[\"aai\"].check_module_instance( vnf_info[\"vnf_id\"], module_ref[\"requestReferences\"][\"instanceId\"]): module_ok", "the Apache License, Version 2.0 # which accompanies this distribution,", "= self.check_vnf( self.module_infos[elt][\"module_instance_name\"]) if check_vnf: self.__logger.info(\"Stack successfully checked\") return {\"status\":", "{} module_ref = {\"instanceId\": \"\"} module_ok = False check_vnf =", "module_info) self.module_infos[elt] = module_info return module_info def check_vnf(self, stack_name): \"\"\"", "including the instance_id, the request_info and the service payload *", "# preload VNF(s) in SDNC self.preload(elt) time.sleep(10) if vnf_ok: #", "Instance \") service_payload = self.components[\"so\"].get_service_payload( self.vnf_config[\"vnf\"], self.components[\"so\"].get_request_info( self.vnf_config['sdnc_vnf_name']), self.components[\"so\"].get_service_model_info( self.vnf_config['invariant_uuid'],", "the VF module id of the VNF \"\"\" self.__logger.info(\" Clean", "(SO) \"\"\" instance_info = {\"instance_id\": \"\"} vnf_info = {\"vnf_id\": \"\"}", "to destroy resources, sdnc module name shall be given if", "service_payload = ( self.components[\"nbi\"].get_nbi_service_order_payload()) nbi_info = self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id", "\"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload", "vf_config[\"module_customization_id\"] = ( onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\"))", "to create the instance * with SO * with NBI", "VNF(s) in SDNC self.preload(elt) time.sleep(10) if vnf_ok: # create VF", "if \"sdnc_vnf_name\" in kwargs: self.vnf_config[\"sdnc_vnf_name\"] = kwargs[\"sdnc_vnf_name\"] # Random part", "terms of the Apache License, Version 2.0 # which accompanies", "self.components[\"nbi\"].create_service_order_nbi( service_payload) time.sleep(5) instance_id = ( self.components[\"nbi\"].get_service_instance_id_from_order( nbi_info[\"id\"])) else: self.__logger.info(\"1)", "instance of the VNF * vnf_id:The VNF id of the", "= onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\") vnf_param =", "%s \", elt) self.components[\"so\"].delete_vnf( self.service_infos[\"instance_id\"], self.vnf_infos[elt][\"vnf_id\"], self.vnf_infos[elt][\"vnf_payload\"]) def clean_module(self, elt):", "same order # than the vnf vf_index = onap_utils.get_vf_module_index( self.module_infos['list'],", "= self.components[\"sdnc\"].preload(sdnc_payload) self.__logger.debug(\"SDNC preload answer: %s\", sdnc_preload) vnf_preload_infos[elt] = ({\"sdnc_payload\":", "self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelName\") vnf_param = (self.vnf_config[\"vnf\"] +", "SO\") self.__logger.info(\"********************************\") request_info = self.components[\"so\"].get_request_info( self.vnf_config[\"vnf\"] + \"-service-instance-\" + self.vnf_config['random_string'])", "VF module instance \"\"\" instance_id = self.service_infos['instance_id'] for elt in", "VNF instance in SO\") self.__logger.info(\"****************************\") model_info = self.components[\"so\"].get_vnf_model_info( self.vnf_config[elt]['vnf_invariant_id'], self.vnf_config[elt]['vnf_version_id'],", "\"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes for", "SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"] + \"-vfmodule-instance-\" + str(elt).replace(\"", "is FAIL we return False clean_preload = self.components[\"sdnc\"].delete_preload( self.module_infos[elt][\"module_instance_name\"], self.vnf_config[elt][\"sdnc_vnf_type\"])", "of the VF module instance \"\"\" instance_id = self.service_infos['instance_id'] for", "clean_vnf(self, elt): \"\"\" Clean VNF Args: * instance_id: The service", "\".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelUUID\")", "self.vnf_config['uuid']) if self.vnf_config[\"nbi\"]: self.__logger.info(\"1) Create Service instance from NBI\") self.__logger.info(\"***********************************\")", "vnf_list} self.module_infos = {'list': vf_module_list} # retrieve infos from the", "= elt vnf_config[\"vnf_model_name\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" + vnf_config[\"vnf_customization_name\"] +", "self.set_service_instance_var() self.set_vnf_var() self.set_module_var() self.set_onap_components() def set_service_instance_var(self): \"\"\" set service instance", "self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\": vnf_name, \"generic-vnf-type\": ( self.vnf_config[elt]['vnf_generic_type']), \"service-type\":", "VNF Args: * instance_id: The service instance of the VNF", "self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload = self.components[\"sdnc\"].get_preload_payload( self.vnf_config[elt]['vnf_parameters'], vnf_topology_identifier)", "Create Service instance from NBI\") self.__logger.info(\"***********************************\") request_info = self.components[\"nbi\"].get_request_info() service_payload", "self.vnf_config[\"vnf\"], \"topology_template.node_templates\")) vf_module_list = list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\")) # Class attributes", "self.vnf_config[\"vnf_name\"] + \"/\" + vnf_config[\"vnf_customization_name\"]) self.vnf_config[elt] = vnf_config def set_module_var(self):", "self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type + \".metadata.vfModuleModelCustomizationUUID\")) vf_config[\"module_version_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"],", "self.__logger.info(\" Clean Module VF Instance %s \", elt) instance_id =", "service_payload} self.__logger.info(\"Service instance created: %s\", service_instance_info) self.service_infos = service_instance_info return", "automate the design phase \"\"\" __logger = logging.getLogger(__name__) def __init__(self,", "Preload VNF %s in SDNC\", elt) self.__logger.info(\"*******************************\") vnf_name = (self.vnf_config[\"vnf\"]", "module instance (SO) \"\"\" instance_info = {\"instance_id\": \"\"} vnf_info =", "\"vnf_related_instance\": vnf_related_instance} self.__logger.info(\">>>> SO vnf instance created %s\", vnf_info) self.vnf_infos[elt]", "the VNF instance (SO) * preload the VNF in the", "if check_vnf: self.__logger.info(\"Stack successfully checked\") return {\"status\": module_ok, \"instance_id\": instance_info,", "+ \".metadata.vfModuleModelUUID\") self.vnf_config[elt].update(vf_config) def set_onap_components(self): \"\"\" Set ONAP component objects", "\", \"_\") + \"_\" + self.vnf_config['random_string']) vnf_topology_identifier = { \"generic-vnf-name\":", "instance id of the VNF * vnf_id:The VNF id of", "( self.vnf_config[elt]['vnf_generic_type']), \"service-type\": self.service_infos[\"instance_id\"], \"vnf-name\": vnf_name, \"vnf-type\": self.vnf_config[elt]['sdnc_vnf_type']} sdnc_payload =", "dict including the vnf_id, vnf_related_instance and the vnf payload \"\"\"", "= kwargs[\"case\"] if \"nbi\" in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] #", "+ vnf_config[\"vnf_customization_name\"] + \".metadata.name\") vnf_config[\"vnf_invariant_id\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.node_templates.\" +", "\"\"\" Clean VNF instance Args: * instance_id: The service instance", "onap_tests.components.aai as aai import onap_tests.components.so as so import onap_tests.components.sdnc as", "# check VNF using OpenStack directly check_vnf = self.check_vnf( self.module_infos[elt][\"module_instance_name\"])", "self).__init__() self.vnf_config = {} self.components = {} if \"case\" not", "list(onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups\"))[i] vnf_config[\"vnf_generic_name\"] = ( self.vnf_config[\"vnf_name\"] + \"-service-instance-\" +", "onap_utils.get_config(\"onap.service.name\") + \"_\" + kwargs[\"case\"] + \"_\" + self.vnf_config[\"random_string\"]) vnf_list", "'module_payload': module_payload, 'module_model_info': module_model_info, 'module_related_instance': module_related_instance}) self.__logger.info(\"SO module vf(s) created:", "VNF %s\", elt) vf_config[\"sdnc_vnf_type\"] = onap_utils.get_template_param( self.vnf_config[\"vnf\"], \"topology_template.groups.\" + vnf_type", "self.vnf_config[elt] = vnf_config def set_module_var(self): \"\"\" set module variables from", "VNF instance Args: * elt: the VNF \"\"\" vnf_id =", "\"\"\" for i, elt in enumerate(self.vnf_infos['list']): vnf_config = {} self.__logger.info(\"get", "return False else: self.clean_instance(instance_id) if self.components[\"aai\"].check_service_instance_cleaned( self.vnf_config[\"vnf_name\"], instance_id): self.__logger.debug(\"Instance still", "started \"\"\" check_vnf = False try: my_stack_checker = sc.StackChecker() if", "kwargs[\"case\"] if \"nbi\" in kwargs: self.vnf_config[\"nbi\"] = kwargs[\"nbi\"] # can", "vnf_info[\"vnf_id\"]): vnf_ok = False break else: # preload VNF(s) in", "file \"\"\" for i, elt in enumerate(self.vnf_infos['list']): vnf_config = {}", "\", \"_\") + \"_\" + self.vnf_config['random_string']) request_info = self.components[\"so\"].get_request_info( module_instance_name)" ]
[ "o = random.randint( 0, 1000 ) self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS)", "__license__ = \"MIT\" __version__ = \"0.0\" __maintainer__ = \"<NAME>\" __email__", "of the forms fields self._control0 = ControlEventsGraph('Check me') self._control1 =", "self._control3.value = e def __btn1(self): self._start = time.time() timer =", "= self._control0.value e = b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) )", "self._control3.value e = b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value", "timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2)", "= self._control2.value e = b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) )", "#Execute the application if __name__ == \"__main__\": pyforms.start_app( SimpleExample )", "e def __addEvent2(self): b = self._control2.value e = b+self.INTERVAL self._control2.add_event(", "8 def __btn(self): for i in range(40): s = random.randint(", "random.randint( 0, 10000 ) o = random.randint( 0, 1000 )", "utf-8 -*- __author__ = \"<NAME>\" __credits__ = [\"<NAME>\"] __license__ =", "e self._txt.value = str(time.time() - self._start) def __addEvent1(self): b =", "time.time() self.INTERVAL = 500 self.N_TRACKS = 8 def __btn(self): for", "################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the application if __name__ == \"__main__\":", "timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute", "s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value", "e def __btn1(self): self._start = time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0)", "= ControlButton('Load button') self.formset = [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt',", "= \"<NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"MIT\" __version__ =", "def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of the forms fields self._control0", "#Definition of the forms fields self._control0 = ControlEventsGraph('Check me') self._control1", "e = b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value =", "= ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check me') self._txt = ControlText('Time')", "= \"<EMAIL>\" __status__ = \"Development\" from __init__ import * import", "timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL)", "timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the", "= ControlText('Time') self._btn = ControlButton('Click') self._btn1 = ControlButton('Click 1') self._save", "import random, time from PyQt4 import QtCore class SimpleExample(BaseWidget): def", "from __init__ import * import random, time from PyQt4 import", "in range(40): s = random.randint( 0, 10000 ) o =", "self._txt.value = str(time.time() - self._start) def __addEvent1(self): b = self._control1.value", "self._save.value = self.save_window self._load.value = self.load_window self._start = time.time() self.INTERVAL", "= e def __addEvent2(self): b = self._control2.value e = b+self.INTERVAL", "b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value = e def __addEvent2(self): b", "0, 1000 ) self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0,", "= b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value = e", "self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value = e self._txt.value =", "= ControlButton('Click') self._btn1 = ControlButton('Click 1') self._save = ControlButton('Save button')", "b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value = e self._txt.value", "self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value = e def __btn1(self):", "= 5000 def __addEvent0(self): b = self._control0.value e = b+self.INTERVAL", "[ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value = self.__btn self._btn1.value", "track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000 def __addEvent0(self): b = self._control0.value", "= ControlEventsGraph('Check me') self._txt = ControlText('Time') self._btn = ControlButton('Click') self._btn1", "\"<NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"MIT\" __version__ = \"0.0\"", ") self._control2.value = e def __addEvent3(self): b = self._control3.value e", "= ControlButton('Click 1') self._save = ControlButton('Save button') self._load = ControlButton('Load", "self.INTERVAL = 500 self.N_TRACKS = 8 def __btn(self): for i", "('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value = self.__btn self._btn1.value = self.__btn1", "\"Development\" from __init__ import * import random, time from PyQt4", "self._txt = ControlText('Time') self._btn = ControlButton('Click') self._btn1 = ControlButton('Click 1')", "import QtCore class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of", "ControlButton('Save button') self._load = ControlButton('Load button') self.formset = [ ('_btn','_btn1'),", "e def __addEvent3(self): b = self._control3.value e = b+self.INTERVAL self._control3.add_event(", "self._start = time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer =", "= random.randint( 0, 10000 ) o = random.randint( 0, 1000", "self._control0.value = e self._txt.value = str(time.time() - self._start) def __addEvent1(self):", "PyQt4 import QtCore class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition", "def __btn(self): for i in range(40): s = random.randint( 0,", "__addEvent3(self): b = self._control3.value e = b+self.INTERVAL self._control3.add_event( b, e,", "self._control2.value e = b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value", "ControlButton('Load button') self.formset = [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')]", "= self.__btn self._btn1.value = self.__btn1 self._save.value = self.save_window self._load.value =", "-*- __author__ = \"<NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"MIT\"", "b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value = e def", ") o = random.randint( 0, 1000 ) self._control0.add_event( s, s+o,", "= \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" from __init__", "__addEvent2(self): b = self._control2.value e = b+self.INTERVAL self._control2.add_event( b, e,", "forms fields self._control0 = ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check me')", "def __addEvent2(self): b = self._control2.value e = b+self.INTERVAL self._control2.add_event( b,", "self._btn = ControlButton('Click') self._btn1 = ControlButton('Click 1') self._save = ControlButton('Save", "################################################################################################################## #Execute the application if __name__ == \"__main__\": pyforms.start_app( SimpleExample", "me') self._txt = ControlText('Time') self._btn = ControlButton('Click') self._btn1 = ControlButton('Click", "= self._control1.value e = b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) )", "time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1)", "b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value = e def __addEvent3(self): b", "track=random.randint(0,self.N_TRACKS) ) self._control3.value = e def __btn1(self): self._start = time.time()", "QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the application if", "timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL)", "= [\"<NAME>\"] __license__ = \"MIT\" __version__ = \"0.0\" __maintainer__ =", "= e def __btn1(self): self._start = time.time() timer = QtCore.QTimer(self.form)", "= random.randint( 0, 1000 ) self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) )", "= \"0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ =", "self._start = time.time() self.INTERVAL = 500 self.N_TRACKS = 8 def", "me') self._control2 = ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check me') self._txt", "################################################################################################################## ################################################################################################################## #Execute the application if __name__ == \"__main__\": pyforms.start_app(", "e, track=random.randint(0,self.N_TRACKS) ) self._control0.value = e self._txt.value = str(time.time() -", "str(time.time() - self._start) def __addEvent1(self): b = self._control1.value e =", "self._btn1 = ControlButton('Click 1') self._save = ControlButton('Save button') self._load =", "= ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check", "button') self.formset = [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value", "ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check me')", "QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer =", "b = self._control1.value e = b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS)", "self.__btn1 self._save.value = self.save_window self._load.value = self.load_window self._start = time.time()", "__init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of the forms fields self._control0 =", "__btn1(self): self._start = time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer", "= str(time.time() - self._start) def __addEvent1(self): b = self._control1.value e", "s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\")", "__btn(self): for i in range(40): s = random.randint( 0, 10000", ") self._control0.value = e self._txt.value = str(time.time() - self._start) def", "self._control3 = ControlEventsGraph('Check me') self._txt = ControlText('Time') self._btn = ControlButton('Click')", "= self.load_window self._start = time.time() self.INTERVAL = 500 self.N_TRACKS =", "('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value = self.__btn self._btn1.value = self.__btn1 self._save.value", "track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value =", "me') self._control1 = ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check me') self._control3", "self._control1 = ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check me') self._control3 =", "import * import random, time from PyQt4 import QtCore class", "timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form)", "e, track=random.randint(0,self.N_TRACKS) ) self._control2.value = e def __addEvent3(self): b =", "button') self._load = ControlButton('Load button') self.formset = [ ('_btn','_btn1'), ('_control0','_control1'),", "= QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ##################################################################################################################", "self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value = e def __addEvent2(self):", "self._btn.value = self.__btn self._btn1.value = self.__btn1 self._save.value = self.save_window self._load.value", "random.randint( 0, 1000 ) self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event(", "self.formset = [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value =", "= 8 def __btn(self): for i in range(40): s =", "b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value = e self._txt.value = str(time.time()", "timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3)", "e, track=random.randint(0,self.N_TRACKS) ) self._control1.value = e def __addEvent2(self): b =", "class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of the forms", "self._control0 = ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check me') self._control2 =", "\"MIT\" __version__ = \"0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "__maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" from", "self._save = ControlButton('Save button') self._load = ControlButton('Load button') self.formset =", "self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS),", "= b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value = e", "self._control2.value = e def __addEvent3(self): b = self._control3.value e =", "self.__btn self._btn1.value = self.__btn1 self._save.value = self.save_window self._load.value = self.load_window", "s = random.randint( 0, 10000 ) o = random.randint( 0,", "= \"MIT\" __version__ = \"0.0\" __maintainer__ = \"<NAME>\" __email__ =", "self._btn1.value = self.__btn1 self._save.value = self.save_window self._load.value = self.load_window self._start", "ControlButton('Click 1') self._save = ControlButton('Save button') self._load = ControlButton('Load button')", "__addEvent0(self): b = self._control0.value e = b+self.INTERVAL self._control0.add_event( b, e,", "self._control1.value e = b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value", "__email__ = \"<EMAIL>\" __status__ = \"Development\" from __init__ import *", "1000 ) self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000),", ") #self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000", "- self._start) def __addEvent1(self): b = self._control1.value e = b+self.INTERVAL", "= b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value = e", "ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check me')", "= QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer", "track=random.randint(0,self.N_TRACKS) ) self._control2.value = e def __addEvent3(self): b = self._control3.value", "ControlText('Time') self._btn = ControlButton('Click') self._btn1 = ControlButton('Click 1') self._save =", "self._control1.value = e def __addEvent2(self): b = self._control2.value e =", "= e def __addEvent3(self): b = self._control3.value e = b+self.INTERVAL", "# -*- coding: utf-8 -*- __author__ = \"<NAME>\" __credits__ =", "s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000 def __addEvent0(self): b =", "track=random.randint(0,self.N_TRACKS) ) self._control0.value = e self._txt.value = str(time.time() - self._start)", "timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL)", "i in range(40): s = random.randint( 0, 10000 ) o", "b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value = e def __btn1(self): self._start", "= QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer", "self.load_window self._start = time.time() self.INTERVAL = 500 self.N_TRACKS = 8", "__init__ import * import random, time from PyQt4 import QtCore", "from PyQt4 import QtCore class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example')", "__version__ = \"0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__", "SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of the forms fields", "\"0.0\" __maintainer__ = \"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\"", "ControlEventsGraph('Check me') self._txt = ControlText('Time') self._btn = ControlButton('Click') self._btn1 =", "ControlButton('Click') self._btn1 = ControlButton('Click 1') self._save = ControlButton('Save button') self._load", "= b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value = e", "timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ##################################################################################################################", "color=\"#00FFDD\") self._control0.value = 5000 def __addEvent0(self): b = self._control0.value e", "= ControlButton('Save button') self._load = ControlButton('Load button') self.formset = [", "self._load = ControlButton('Load button') self.formset = [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'),", "random, time from PyQt4 import QtCore class SimpleExample(BaseWidget): def __init__(self):", "super(SimpleExample,self).__init__('Simple example') #Definition of the forms fields self._control0 = ControlEventsGraph('Check", "me') self._control3 = ControlEventsGraph('Check me') self._txt = ControlText('Time') self._btn =", "<gh_stars>0 #!/usr/bin/python # -*- coding: utf-8 -*- __author__ = \"<NAME>\"", "b = self._control0.value e = b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS)", "self._control0.value e = b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value", "e = b+self.INTERVAL self._control0.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control0.value =", "#self._control0.add_event( random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000 def", "e = b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value =", "b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value = e def", "e, track=random.randint(0,self.N_TRACKS) ) self._control3.value = e def __btn1(self): self._start =", "= \"Development\" from __init__ import * import random, time from", "\"<NAME>\" __email__ = \"<EMAIL>\" __status__ = \"Development\" from __init__ import", "self._load.value = self.load_window self._start = time.time() self.INTERVAL = 500 self.N_TRACKS", "self.N_TRACKS = 8 def __btn(self): for i in range(40): s", ") self._control1.value = e def __addEvent2(self): b = self._control2.value e", "= self.__btn1 self._save.value = self.save_window self._load.value = self.load_window self._start =", "__credits__ = [\"<NAME>\"] __license__ = \"MIT\" __version__ = \"0.0\" __maintainer__", "self._control2 = ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check me') self._txt =", "track=random.randint(0,self.N_TRACKS) ) self._control1.value = e def __addEvent2(self): b = self._control2.value", "-*- coding: utf-8 -*- __author__ = \"<NAME>\" __credits__ = [\"<NAME>\"]", "__status__ = \"Development\" from __init__ import * import random, time", "def __addEvent1(self): b = self._control1.value e = b+self.INTERVAL self._control1.add_event( b,", "QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer =", "self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control2.value = e def __addEvent3(self):", "random.randint(0, 10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000 def __addEvent0(self):", ") self._control3.value = e def __btn1(self): self._start = time.time() timer", "self._control0.value = 5000 def __addEvent0(self): b = self._control0.value e =", "= self._control3.value e = b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) )", "example') #Definition of the forms fields self._control0 = ControlEventsGraph('Check me')", "500 self.N_TRACKS = 8 def __btn(self): for i in range(40):", "b = self._control3.value e = b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS)", "coding: utf-8 -*- __author__ = \"<NAME>\" __credits__ = [\"<NAME>\"] __license__", ") self._control0.add_event( s, s+o, track=random.randint(0,self.N_TRACKS) ) #self._control0.add_event( random.randint(0, 10000), s+o,", "def __addEvent0(self): b = self._control0.value e = b+self.INTERVAL self._control0.add_event( b,", "\"<EMAIL>\" __status__ = \"Development\" from __init__ import * import random,", "__author__ = \"<NAME>\" __credits__ = [\"<NAME>\"] __license__ = \"MIT\" __version__", "fields self._control0 = ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check me') self._control2", "= [ ('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value = self.__btn", "__addEvent1(self): b = self._control1.value e = b+self.INTERVAL self._control1.add_event( b, e,", "= time.time() self.INTERVAL = 500 self.N_TRACKS = 8 def __btn(self):", "[\"<NAME>\"] __license__ = \"MIT\" __version__ = \"0.0\" __maintainer__ = \"<NAME>\"", "('_btn','_btn1'), ('_control0','_control1'), ('_control2','_control3'), '_txt', ('_save','_load')] self._btn.value = self.__btn self._btn1.value =", "b = self._control2.value e = b+self.INTERVAL self._control2.add_event( b, e, track=random.randint(0,self.N_TRACKS)", "* import random, time from PyQt4 import QtCore class SimpleExample(BaseWidget):", "timer.timeout.connect(self.__addEvent1) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form)", "self.save_window self._load.value = self.load_window self._start = time.time() self.INTERVAL = 500", "5000 def __addEvent0(self): b = self._control0.value e = b+self.INTERVAL self._control0.add_event(", "QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent2) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ##################################################################################################################", "b+self.INTERVAL self._control1.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control1.value = e def", "= ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check me') self._control2 = ControlEventsGraph('Check", "0, 10000 ) o = random.randint( 0, 1000 ) self._control0.add_event(", "def __btn1(self): self._start = time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL)", "def __addEvent3(self): b = self._control3.value e = b+self.INTERVAL self._control3.add_event( b,", "the forms fields self._control0 = ControlEventsGraph('Check me') self._control1 = ControlEventsGraph('Check", "#!/usr/bin/python # -*- coding: utf-8 -*- __author__ = \"<NAME>\" __credits__", "time from PyQt4 import QtCore class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple", "'_txt', ('_save','_load')] self._btn.value = self.__btn self._btn1.value = self.__btn1 self._save.value =", "timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the application if __name__", "timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the application if __name__ ==", "for i in range(40): s = random.randint( 0, 10000 )", "= e self._txt.value = str(time.time() - self._start) def __addEvent1(self): b", "= self.save_window self._load.value = self.load_window self._start = time.time() self.INTERVAL =", "10000 ) o = random.randint( 0, 1000 ) self._control0.add_event( s,", "= QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent3) timer.start(self.INTERVAL) ################################################################################################################## ################################################################################################################## ################################################################################################################## #Execute the application", "ControlEventsGraph('Check me') self._control3 = ControlEventsGraph('Check me') self._txt = ControlText('Time') self._btn", "= time.time() timer = QtCore.QTimer(self.form) timer.timeout.connect(self.__addEvent0) timer.start(self.INTERVAL) timer = QtCore.QTimer(self.form)", "10000), s+o, track=random.randint(0,self.N_TRACKS), color=\"#00FFDD\") self._control0.value = 5000 def __addEvent0(self): b", "QtCore class SimpleExample(BaseWidget): def __init__(self): super(SimpleExample,self).__init__('Simple example') #Definition of the", "= 500 self.N_TRACKS = 8 def __btn(self): for i in", "e = b+self.INTERVAL self._control3.add_event( b, e, track=random.randint(0,self.N_TRACKS) ) self._control3.value =", "self._start) def __addEvent1(self): b = self._control1.value e = b+self.INTERVAL self._control1.add_event(", "range(40): s = random.randint( 0, 10000 ) o = random.randint(", "('_save','_load')] self._btn.value = self.__btn self._btn1.value = self.__btn1 self._save.value = self.save_window", "1') self._save = ControlButton('Save button') self._load = ControlButton('Load button') self.formset" ]
[ "# pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`. x, y =", "x += self.image_window.col_off y += self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image)", "if self.images_in_list: if self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image)", "# Pixel to whatever crs the image is in #", "coords.tolist() def refocus(self, lat, lon): self.center_lat = lat self.center_lon =", "`OrthoPhotoView` has no attribute `geot`. x, y = self.geot.xy(y, x)", "if self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def", "self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True)", "to pixels (in the viewing window) \"\"\" h, w =", "TODO add widget for zoom level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat,", "whole geotiff) to pixels (in the viewing window) \"\"\" h,", "= lon self.populate_image_list() if self.images_in_list: if self.current_image not in self.images_in_list:", "to latlon \"\"\" if not self.is_geo_reference: return None # Pixel", "[x], [y]) return lats[0], lons[0] def gcp_to_pixel_coordinates(self, x: float, y:", "the viewing window) \"\"\" h, w = self.image_manager.get_image_size(self.current_image) px =", "= features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] # pyre-fixme[16]: `OrthoPhotoView` has no", "if not self.is_geo_reference: return None # Pixel to whatever crs", "float]: \"\"\" Transforms from pixels (in the viewing window) to", "class OrthoPhotoView(View): def __init__( self, main_ui, path: str, init_lat: float,", "float, init_lon: float, is_geo_reference: bool = False, ): \"\"\"[summary] Args:", "float) -> Tuple[float, float]: \"\"\" Transforms from normalized coordinates (in", "to normalized coordinates (in the whole geotiff) \"\"\" # pyre-fixme[16]:", "self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat, lon =", "self.center_lon = lon self.populate_image_list() if self.images_in_list: if self.current_image not in", "f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title = f\"No orthophotos around {lat},", "lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot = self.current_image seq_ix = self.images_in_list.index(shot) title", "px[0] - self.image_window.col_off y = px[1] - self.image_window.row_off # pyre-fixme[7]:", "self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim =", "x = px[0] - self.image_window.col_off y = px[1] - self.image_window.row_off", "no attribute `image_window`. x += self.image_window.col_off y += self.image_window.row_off h,", "\"\"\" self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px =", "bool = False, ): \"\"\"[summary] Args: main_ui (GUI.Gui) path (str):", "\"\"\" if not self.is_geo_reference: return None # Pixel to whatever", "self.set_title() def get_image(self, new_image): crop, image_window, geot = self.image_manager.read_image_around_latlon( new_image,", "+= self.image_window.col_off y += self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords", ") def pixel_to_latlon(self, x: float, y: float): \"\"\" From pixels", "coordinates (in the whole geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView` has", "\"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot = self.current_image seq_ix =", "self.size ) def pixel_to_latlon(self, x: float, y: float): \"\"\" From", "str, init_lat: float, init_lon: float, is_geo_reference: bool = False, ):", "def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size ) def pixel_to_latlon(self,", "= self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] # pyre-fixme[16]:", "got `List[typing.Any]`. return [x, y] def pixel_to_gcp_coordinates(self, x: float, y:", "to WSG84 (lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y])", "self.geot.xy(y, x) # And then to WSG84 (lat/lon) lons, lats", "= \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot = self.current_image seq_ix", "features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] # pyre-fixme[16]: `OrthoPhotoView` has no attribute", "pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x = px[0] -", "coordinates (in the whole geotiff) to pixels (in the viewing", "from .orthophoto_manager import OrthoPhotoManager from .view import View class OrthoPhotoView(View):", "the viewing window) to normalized coordinates (in the whole geotiff)", "= self.current_image seq_ix = self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\"", "in # pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`. x, y", "= self.center_lat, self.center_lon if self.images_in_list: t = \"Images covering lat:{:.4f},", "`geot`. x, y = self.geot.xy(y, x) # And then to", "self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist() def", "self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title = f\"No", "Transforms from normalized coordinates (in the whole geotiff) to pixels", "# pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`. return [x,", "opensfm import features from .orthophoto_manager import OrthoPhotoManager from .view import", "f\"No orthophotos around {lat}, {lon}\" self.current_image = None self.ax.clear() self.ax.axis(\"off\")", "from .view import View class OrthoPhotoView(View): def __init__( self, main_ui,", "-> Tuple[float, float]: \"\"\" Transforms from pixels (in the viewing", "is in # pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`. x,", "self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size ) def pixel_to_latlon(self, x: float, y:", "return lats[0], lons[0] def gcp_to_pixel_coordinates(self, x: float, y: float) ->", "window) to normalized coordinates (in the whole geotiff) \"\"\" #", "return [x, y] def pixel_to_gcp_coordinates(self, x: float, y: float) ->", "init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image): crop,", "None # Pixel to whatever crs the image is in", "Tuple import numpy as np import rasterio.warp from opensfm import", "self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image): crop, image_window,", "pixels (in the viewing window) to normalized coordinates (in the", "self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat, lon = self.center_lat, self.center_lon if", "lon) shot = self.current_image seq_ix = self.images_in_list.index(shot) title = f\"{t}", "lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return lats[0], lons[0] def", "50 # TODO add widget for zoom level super(OrthoPhotoView, self).__init__(main_ui,", "Tuple[float, float]: \"\"\" Transforms from normalized coordinates (in the whole", "zoom level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list:", "self.ax.get_xlim() ylim = self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists)", "Expected `Tuple[float, float]` but got `List[typing.Any]`. return [x, y] def", "`image_window`. x += self.image_window.col_off y += self.image_window.row_off h, w =", "self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image,", "\"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat, lon = self.center_lat, self.center_lon", "`List[typing.Any]`. return [x, y] def pixel_to_gcp_coordinates(self, x: float, y: float)", "= OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference", "is_geo_reference self.size = 50 # TODO add widget for zoom", "seq_ix = self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title", "\"\"\" # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x +=", "def refocus(self, lat, lon): self.center_lat = lat self.center_lon = lon", "self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px = 500", "pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`. return [x, y]", "self.center_lat, self.center_lon if self.images_in_list: t = \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat,", "window) \"\"\" h, w = self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]),", "float): \"\"\" From pixels (in the viewing window) to latlon", "geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px", "attribute `geot`. x, y = self.geot.xy(y, x) # And then", "self.images_in_list: t = \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot =", "w = self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] #", "crop, image_window, geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size )", "h, w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0]", "OrthoPhotoView(View): def __init__( self, main_ui, path: str, init_lat: float, init_lon:", "(GUI.Gui) path (str): path containing geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path,", "# TODO add widget for zoom level super(OrthoPhotoView, self).__init__(main_ui, False)", "lon:{:.4f}\".format(lat, lon) shot = self.current_image seq_ix = self.images_in_list.index(shot) title =", "(in the viewing window) \"\"\" h, w = self.image_manager.get_image_size(self.current_image) px", "[{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title = f\"No orthophotos around {lat}, {lon}\"", "numpy as np import rasterio.warp from opensfm import features from", "x: float, y: float): \"\"\" From pixels (in the viewing", "x) # And then to WSG84 (lat/lon) lons, lats =", "= px[0] - self.image_window.col_off y = px[1] - self.image_window.row_off #", "artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat,", "return None # Pixel to whatever crs the image is", "pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`. x, y = self.geot.xy(y,", "import View class OrthoPhotoView(View): def __init__( self, main_ui, path: str,", "\"\"\" Transforms from pixels (in the viewing window) to normalized", "self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self):", "): \"\"\"[summary] Args: main_ui (GUI.Gui) path (str): path containing geotiffs", "# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x = px[0]", "And then to WSG84 (lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\",", "self.image_window = image_window self.geot = geot return crop def get_candidate_images(self):", "geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x", "h)[0] # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x =", "x: float, y: float) -> Tuple[float, float]: \"\"\" Transforms from", "from opensfm import features from .orthophoto_manager import OrthoPhotoManager from .view", "viewing window) to normalized coordinates (in the whole geotiff) \"\"\"", "def get_image(self, new_image): crop, image_window, geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat,", "+= self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]),", "add widget for zoom level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon)", "= px[1] - self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float, float]` but", "ylim = self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle()", "the whole geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView` has no attribute", "self, main_ui, path: str, init_lat: float, init_lon: float, is_geo_reference: bool", "title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title = f\"No orthophotos", "self.populate_image_list() if self.images_in_list: if self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else:", "(in the whole geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView` has no", "[x, y] def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float,", "(in the viewing window) to normalized coordinates (in the whole", "# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x += self.image_window.col_off", "def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]: \"\"\"", "pixels (in the viewing window) to latlon \"\"\" if not", "self.center_lon, self.size ) self.image_window = image_window self.geot = geot return", "around {lat}, {lon}\" self.current_image = None self.ax.clear() self.ax.axis(\"off\") self.canvas.draw_idle() self.window.title(title)", "self.is_geo_reference = is_geo_reference self.size = 50 # TODO add widget", "def __init__( self, main_ui, path: str, init_lat: float, init_lon: float,", "y] def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]:", "From pixels (in the viewing window) to latlon \"\"\" if", "self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def", "= features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist() def refocus(self, lat,", "pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x += self.image_window.col_off y", "refocus(self, lat, lon): self.center_lat = lat self.center_lon = lon self.populate_image_list()", "path (str): path containing geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path, 100.0)", "self.images_in_list: if self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title()", "(in the whole geotiff) to pixels (in the viewing window)", "= is_geo_reference self.size = 50 # TODO add widget for", "else: title = f\"No orthophotos around {lat}, {lon}\" self.current_image =", "def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim() ylim", "self.zoom_window_size_px = 500 self.is_geo_reference = is_geo_reference self.size = 50 #", "normalized coordinates (in the whole geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView`", "{shot}\" else: title = f\"No orthophotos around {lat}, {lon}\" self.current_image", "= self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist()", "self.geot = geot return crop def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat,", "xlim = self.ax.get_xlim() ylim = self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim),", "not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image):", "viewing window) to latlon \"\"\" if not self.is_geo_reference: return None", "False) self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self,", "self.size ) self.image_window = image_window self.geot = geot return crop", "pixels (in the viewing window) \"\"\" h, w = self.image_manager.get_image_size(self.current_image)", "self.center_lon if self.images_in_list: t = \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon)", "= self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat, lon", "super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title()", "crop def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size ) def", "= self.ax.get_xlim() ylim = self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\")", "new_image, self.center_lat, self.center_lon, self.size ) self.image_window = image_window self.geot =", "return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size ) def pixel_to_latlon(self, x: float,", "init_lon: float, is_geo_reference: bool = False, ): \"\"\"[summary] Args: main_ui", "self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image): crop, image_window, geot =", "y += self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x,", "image is in # pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`.", "self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim() ylim = self.ax.get_ylim() artists =", "level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0])", "import numpy as np import rasterio.warp from opensfm import features", "widget for zoom level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list()", "self.current_image seq_ix = self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else:", "title = f\"No orthophotos around {lat}, {lon}\" self.current_image = None", "= 500 self.is_geo_reference = is_geo_reference self.size = 50 # TODO", "float, y: float) -> Tuple[float, float]: \"\"\" Transforms from normalized", "(in the viewing window) to latlon \"\"\" if not self.is_geo_reference:", "path containing geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list =", "\"\"\" From pixels (in the viewing window) to latlon \"\"\"", "np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def set_title(self): lat, lon = self.center_lat,", "(str): path containing geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list", "viewing window) \"\"\" h, w = self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x,", "float) -> Tuple[float, float]: \"\"\" Transforms from pixels (in the", "image_window, geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size ) self.image_window", "import Tuple import numpy as np import rasterio.warp from opensfm", "= self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim), np.mean(ylim), \"rx\") self.plt_artists.extend(artists) self.canvas.draw_idle() def", "geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size ) self.image_window =", "[y]) return lats[0], lons[0] def gcp_to_pixel_coordinates(self, x: float, y: float)", "lat, lon = self.center_lat, self.center_lon if self.images_in_list: t = \"Images", "normalized coordinates (in the whole geotiff) to pixels (in the", "x, y = self.geot.xy(y, x) # And then to WSG84", "get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size ) def pixel_to_latlon(self, x:", "w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return", "y = self.geot.xy(y, x) # And then to WSG84 (lat/lon)", "\"\"\"[summary] Args: main_ui (GUI.Gui) path (str): path containing geotiffs \"\"\"", "px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] # pyre-fixme[16]: `OrthoPhotoView` has", "`OrthoPhotoView` has no attribute `image_window`. x += self.image_window.col_off y +=", "attribute `image_window`. x += self.image_window.col_off y += self.image_window.row_off h, w", "self.image_window.col_off y = px[1] - self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float,", "window) to latlon \"\"\" if not self.is_geo_reference: return None #", "super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim() ylim = self.ax.get_ylim() artists", "init_lat: float, init_lon: float, is_geo_reference: bool = False, ): \"\"\"[summary]", "rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return lats[0], lons[0] def gcp_to_pixel_coordinates(self, x:", "\"\"\" h, w = self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]), w,", "self.center_lat = lat self.center_lon = lon self.populate_image_list() if self.images_in_list: if", "features from .orthophoto_manager import OrthoPhotoManager from .view import View class", "self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size ) self.image_window = image_window self.geot", ".view import View class OrthoPhotoView(View): def __init__( self, main_ui, path:", "-> Tuple[float, float]: \"\"\" Transforms from normalized coordinates (in the", "the image is in # pyre-fixme[16]: `OrthoPhotoView` has no attribute", "Pixel to whatever crs the image is in # pyre-fixme[16]:", "500 self.is_geo_reference = is_geo_reference self.size = 50 # TODO add", "self.is_geo_reference: return None # Pixel to whatever crs the image", "if self.images_in_list: t = \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot", "has no attribute `geot`. x, y = self.geot.xy(y, x) #", "= lat self.center_lon = lon self.populate_image_list() if self.images_in_list: if self.current_image", "attribute `image_window`. x = px[0] - self.image_window.col_off y = px[1]", "= 50 # TODO add widget for zoom level super(OrthoPhotoView,", "the viewing window) to latlon \"\"\" if not self.is_geo_reference: return", "if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image): crop, image_window, geot", "import features from .orthophoto_manager import OrthoPhotoManager from .view import View", "lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return lats[0], lons[0]", "the whole geotiff) to pixels (in the viewing window) \"\"\"", "= image_window self.geot = geot return crop def get_candidate_images(self): return", "# And then to WSG84 (lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs,", "= geot return crop def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon,", "from normalized coordinates (in the whole geotiff) to pixels (in", "lons[0] def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]:", "= f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title = f\"No orthophotos around", "geot return crop def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size", "crs the image is in # pyre-fixme[16]: `OrthoPhotoView` has no", "from pixels (in the viewing window) to normalized coordinates (in", "features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist() def refocus(self, lat, lon):", "t = \"Images covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot = self.current_image", "main_ui, path: str, init_lat: float, init_lon: float, is_geo_reference: bool =", "path: str, init_lat: float, init_lon: float, is_geo_reference: bool = False,", "__init__( self, main_ui, path: str, init_lat: float, init_lon: float, is_geo_reference:", "self.center_lat, self.center_lon, self.size ) self.image_window = image_window self.geot = geot", "y: float): \"\"\" From pixels (in the viewing window) to", "has no attribute `image_window`. x += self.image_window.col_off y += self.image_window.row_off", ") self.image_window = image_window self.geot = geot return crop def", "lat self.center_lon = lon self.populate_image_list() if self.images_in_list: if self.current_image not", "covering lat:{:.4f}, lon:{:.4f}\".format(lat, lon) shot = self.current_image seq_ix = self.images_in_list.index(shot)", "- self.image_window.col_off y = px[1] - self.image_window.row_off # pyre-fixme[7]: Expected", "def pixel_to_latlon(self, x: float, y: float): \"\"\" From pixels (in", "self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self,", "image_window self.geot = geot return crop def get_candidate_images(self): return self.image_manager.get_candidate_images(", "def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]: \"\"\"", "= self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}\" else: title =", "no attribute `image_window`. x = px[0] - self.image_window.col_off y =", "coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0] return coords.tolist() def refocus(self,", "WSG84 (lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return", "get_image(self, new_image): crop, image_window, geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon,", "orthophotos around {lat}, {lon}\" self.current_image = None self.ax.clear() self.ax.axis(\"off\") self.canvas.draw_idle()", "float]: \"\"\" Transforms from normalized coordinates (in the whole geotiff)", "w, h)[0] return coords.tolist() def refocus(self, lat, lon): self.center_lat =", "is_geo_reference: bool = False, ): \"\"\"[summary] Args: main_ui (GUI.Gui) path", "not self.is_geo_reference: return None # Pixel to whatever crs the", "self.canvas.draw_idle() def set_title(self): lat, lon = self.center_lat, self.center_lon if self.images_in_list:", "Tuple[float, float]: \"\"\" Transforms from pixels (in the viewing window)", "= rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return lats[0], lons[0] def gcp_to_pixel_coordinates(self,", "y]]), w, h)[0] # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.", "whole geotiff) \"\"\" # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.", "float, y: float) -> Tuple[float, float]: \"\"\" Transforms from pixels", "return crop def get_candidate_images(self): return self.image_manager.get_candidate_images( self.center_lat, self.center_lon, self.size )", "latlon \"\"\" if not self.is_geo_reference: return None # Pixel to", "force=True) xlim = self.ax.get_xlim() ylim = self.ax.get_ylim() artists = self.ax.plot(np.mean(xlim),", "h)[0] return coords.tolist() def refocus(self, lat, lon): self.center_lat = lat", "self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim()", "= self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference = is_geo_reference self.size =", "but got `List[typing.Any]`. return [x, y] def pixel_to_gcp_coordinates(self, x: float,", "= False, ): \"\"\"[summary] Args: main_ui (GUI.Gui) path (str): path", "`image_window`. x = px[0] - self.image_window.col_off y = px[1] -", "geotiff) to pixels (in the viewing window) \"\"\" h, w", "= self.geot.xy(y, x) # And then to WSG84 (lat/lon) lons,", "OrthoPhotoManager from .view import View class OrthoPhotoView(View): def __init__( self,", "in self.images_in_list: self.bring_new_image(self.images_in_list[0]) else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView,", "from typing import Tuple import numpy as np import rasterio.warp", "return coords.tolist() def refocus(self, lat, lon): self.center_lat = lat self.center_lon", "set_title(self): lat, lon = self.center_lat, self.center_lon if self.images_in_list: t =", "self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference = is_geo_reference self.size = 50", "then to WSG84 (lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x],", "self.image_window.col_off y += self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords =", "lon self.populate_image_list() if self.images_in_list: if self.current_image not in self.images_in_list: self.bring_new_image(self.images_in_list[0])", "px[1] - self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float, float]` but got", "Args: main_ui (GUI.Gui) path (str): path containing geotiffs \"\"\" self.image_manager", "whatever crs the image is in # pyre-fixme[16]: `OrthoPhotoView` has", "y: float) -> Tuple[float, float]: \"\"\" Transforms from pixels (in", "as np import rasterio.warp from opensfm import features from .orthophoto_manager", "gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]: \"\"\" Transforms", "self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`. return", "`Tuple[float, float]` but got `List[typing.Any]`. return [x, y] def pixel_to_gcp_coordinates(self,", "`OrthoPhotoView` has no attribute `image_window`. x = px[0] - self.image_window.col_off", "no attribute `geot`. x, y = self.geot.xy(y, x) # And", "to whatever crs the image is in # pyre-fixme[16]: `OrthoPhotoView`", "- self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`.", "typing import Tuple import numpy as np import rasterio.warp from", "lat, lon): self.center_lat = lat self.center_lon = lon self.populate_image_list() if", "has no attribute `image_window`. x = px[0] - self.image_window.col_off y", "OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference =", "\"\"\" Transforms from normalized coordinates (in the whole geotiff) to", "self.image_window.row_off h, w = self.image_manager.get_image_size(self.current_image) coords = features.normalized_image_coordinates(np.array([[x, y]]), w,", "else: self.bring_new_image(self.current_image) self.set_title() def bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim", "float, y: float): \"\"\" From pixels (in the viewing window)", "new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim() ylim = self.ax.get_ylim()", "\"EPSG:4326\", [x], [y]) return lats[0], lons[0] def gcp_to_pixel_coordinates(self, x: float,", "h, w = self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0]", "float]` but got `List[typing.Any]`. return [x, y] def pixel_to_gcp_coordinates(self, x:", "y]]), w, h)[0] return coords.tolist() def refocus(self, lat, lon): self.center_lat", "self.image_manager.get_image_size(self.current_image) px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0] # pyre-fixme[16]: `OrthoPhotoView`", "self.size = 50 # TODO add widget for zoom level", "def set_title(self): lat, lon = self.center_lat, self.center_lon if self.images_in_list: t", "False, ): \"\"\"[summary] Args: main_ui (GUI.Gui) path (str): path containing", "= f\"No orthophotos around {lat}, {lon}\" self.current_image = None self.ax.clear()", "y = px[1] - self.image_window.row_off # pyre-fixme[7]: Expected `Tuple[float, float]`", "pixel_to_latlon(self, x: float, y: float): \"\"\" From pixels (in the", "import OrthoPhotoManager from .view import View class OrthoPhotoView(View): def __init__(", "np import rasterio.warp from opensfm import features from .orthophoto_manager import", "lon): self.center_lat = lat self.center_lon = lon self.populate_image_list() if self.images_in_list:", "import rasterio.warp from opensfm import features from .orthophoto_manager import OrthoPhotoManager", "for zoom level super(OrthoPhotoView, self).__init__(main_ui, False) self.refocus(init_lat, init_lon) self.populate_image_list() if", "containing geotiffs \"\"\" self.image_manager = OrthoPhotoManager(path, 100.0) self.images_in_list = self.image_manager.image_keys", "self.center_lon, self.size ) def pixel_to_latlon(self, x: float, y: float): \"\"\"", "(lat/lon) lons, lats = rasterio.warp.transform(self.geot.crs, \"EPSG:4326\", [x], [y]) return lats[0],", "lats[0], lons[0] def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float,", "self.refocus(init_lat, init_lon) self.populate_image_list() if self.images_in_list: self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image):", "new_image): crop, image_window, geot = self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size", "100.0) self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference = is_geo_reference", "= self.image_manager.read_image_around_latlon( new_image, self.center_lat, self.center_lon, self.size ) self.image_window = image_window", "w, h)[0] # pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`. x", "y: float) -> Tuple[float, float]: \"\"\" Transforms from normalized coordinates", "main_ui (GUI.Gui) path (str): path containing geotiffs \"\"\" self.image_manager =", "shot = self.current_image seq_ix = self.images_in_list.index(shot) title = f\"{t} [{seq_ix+1}/{len(self.images_in_list)}]:", "pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]: \"\"\" Transforms", "Transforms from pixels (in the viewing window) to normalized coordinates", "float, is_geo_reference: bool = False, ): \"\"\"[summary] Args: main_ui (GUI.Gui)", "lon = self.center_lat, self.center_lon if self.images_in_list: t = \"Images covering", "self.bring_new_image(self.images_in_list[0]) self.set_title() def get_image(self, new_image): crop, image_window, geot = self.image_manager.read_image_around_latlon(", "rasterio.warp from opensfm import features from .orthophoto_manager import OrthoPhotoManager from", "self.images_in_list = self.image_manager.image_keys self.zoom_window_size_px = 500 self.is_geo_reference = is_geo_reference self.size", ".orthophoto_manager import OrthoPhotoManager from .view import View class OrthoPhotoView(View): def", "bring_new_image(self, new_image): super(OrthoPhotoView, self).bring_new_image(new_image, force=True) xlim = self.ax.get_xlim() ylim =", "View class OrthoPhotoView(View): def __init__( self, main_ui, path: str, init_lat:", "self.center_lat, self.center_lon, self.size ) def pixel_to_latlon(self, x: float, y: float):" ]
[ "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "may obtain # a copy of the License at #", "# # Licensed under the Apache License, Version 2.0 (the", "agreed to in writing, software # distributed under the License", "import security_group_default_rules_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base", "bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules()", "Unless required by applicable law or agreed to in writing,", "test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function( self.client.delete_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.delete', {}, status=204, security_group_default_rule_id=1)", "distributed under the License is distributed on an \"AS IS\"", "setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth,", "\"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\" }, \"to_port\":", "fake_auth_provider from tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = {", "self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def", "} self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self):", "\"10.10.10.0/24\" }, \"to_port\": 80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth", "self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule()", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "{\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True)", "to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self,", "obtain # a copy of the License at # #", "applicable law or agreed to in writing, software # distributed", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "Version 2.0 (the \"License\"); you may # not use this", "specific language governing permissions and limitations # under the License.", "# not use this file except in compliance with the", "not use this file except in compliance with the License.", "rights reserved. # # Licensed under the Apache License, Version", "OF ANY KIND, either express or implied. See the #", "self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne'))", "fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False):", "fake_auth = fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def", "= (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules,", "self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self):", "writing, software # distributed under the License is distributed on", "} def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client =", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule()", "in compliance with the License. You may obtain # a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations #", "{\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def", "self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body,", "# under the License. from tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib", "1, \"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80", "'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self):", "the License. You may obtain # a copy of the", "governing permissions and limitations # under the License. from tempest.lib.services.compute", "bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self):", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "use this file except in compliance with the License. You", "2015 NEC Corporation. All rights reserved. # # Licensed under", "You may obtain # a copy of the License at", "{ \"to_port\": 80, \"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" }", "test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function( self.client.delete_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.delete',", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function( self.client.delete_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.delete', {},", "\"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80 }", "def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function(", "**request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function(", "to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self):", "reserved. # # Licensed under the Apache License, Version 2.0", "bytes_body=False): request_body = { \"to_port\": 80, \"from_port\": 80, \"ip_protocol\": \"TCP\",", "SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\":", "80, \"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule,", "def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "FAKE_RULE = { \"from_port\": 80, \"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\":", "base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\": 80, \"id\": 1,", "= { \"from_port\": 80, \"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\": {", "either express or implied. See the # License for the", "Corporation. All rights reserved. # # Licensed under the Apache", "request_body = { \"to_port\": 80, \"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\":", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "80, \"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\" },", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "_test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self):", "}, \"to_port\": 80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth =", "may # not use this file except in compliance with", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\": 80, \"id\": 1, \"ip_protocol\":", "tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "with the License. You may obtain # a copy of", "KIND, either express or implied. See the # License for", "# License for the specific language governing permissions and limitations", "80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\":", "you may # not use this file except in compliance", "def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function( self.client.delete_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.delete', {}, status=204,", "\"License\"); you may # not use this file except in", "'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body)", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "\"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80 } def", "express or implied. See the # License for the specific", "Copyright 2015 NEC Corporation. All rights reserved. # # Licensed", "this file except in compliance with the License. You may", "language governing permissions and limitations # under the License. from", "the License. from tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib import fake_auth_provider", "test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule,", "self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def", "compliance with the License. You may obtain # a copy", "the Apache License, Version 2.0 (the \"License\"); you may #", "self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body = { \"to_port\": 80, \"from_port\":", "def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client.", "\"to_port\": 80, \"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function(", "under the License. from tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib import", "\"to_port\": 80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider()", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "{ \"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient,", "test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body = { \"to_port\": 80,", "self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def", "to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False):", "(security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get',", "self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "See the # License for the specific language governing permissions", "test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE},", "<reponame>mail2nsrajesh/tempest<filename>tempest/tests/lib/services/compute/test_security_group_default_rules_client.py # Copyright 2015 NEC Corporation. All rights reserved. #", "(the \"License\"); you may # not use this file except", "software # distributed under the License is distributed on an", "_test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def", "def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True) def test_delete_security_group_default_rule(self): self.check_service_client_function( self.client.delete_security_group_default_rule,", "tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\": 80,", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "the # License for the specific language governing permissions and", "test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body =", "\"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE},", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# # Unless required by applicable law or agreed to", "super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute',", "and limitations # under the License. from tempest.lib.services.compute import security_group_default_rules_client", "80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client", "security_group_default_rules_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "{ \"from_port\": 80, \"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\":", "file except in compliance with the License. You may obtain", "'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function( self.client.list_security_group_default_rules, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]},", "def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body", "for the specific language governing permissions and limitations # under", "def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body = { \"to_port\":", "law or agreed to in writing, software # distributed under", "self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get',", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "# Copyright 2015 NEC Corporation. All rights reserved. # #", "from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest):", "security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False):", "from tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services", "[self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self,", "under the Apache License, Version 2.0 (the \"License\"); you may", "\"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80 } def setUp(self): super(TestSecurityGroupDefaultRulesClient, self).setUp()", "except in compliance with the License. You may obtain #", "2.0 (the \"License\"); you may # not use this file", "import fake_auth_provider from tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE =", "implied. See the # License for the specific language governing", "from tempest.tests.lib.services import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\":", "self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self, bytes_body=False): self.check_service_client_function(", "\"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post',", "License. You may obtain # a copy of the License", "import base class TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\": 80, \"id\":", "All rights reserved. # # Licensed under the Apache License,", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "ANY KIND, either express or implied. See the # License", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "\"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body)", "\"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def", "'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1) def test_show_security_group_default_rule_with_str_body(self): self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self):", "# Unless required by applicable law or agreed to in", "permissions and limitations # under the License. from tempest.lib.services.compute import", "limitations # under the License. from tempest.lib.services.compute import security_group_default_rules_client from", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True) def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\":", "_test_create_security_default_group_rule(self, bytes_body=False): request_body = { \"to_port\": 80, \"from_port\": 80, \"ip_protocol\":", "\"TCP\", \"cidr\": \"10.10.10.0/24\" } self.check_service_client_function( self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body,", "to in writing, software # distributed under the License is", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "\"ip_range\": { \"cidr\": \"10.10.10.0/24\" }, \"to_port\": 80 } def setUp(self):", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rules\": [self.FAKE_RULE]}, to_utf=bytes_body) def test_list_security_group_default_rules_with_str_body(self): self._test_list_security_group_default_rules() def test_list_security_group_default_rules_with_bytes_body(self): self._test_list_security_group_default_rules(bytes_body=True)", "tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib import fake_auth_provider from tempest.tests.lib.services import", "= { \"to_port\": 80, \"from_port\": 80, \"ip_protocol\": \"TCP\", \"cidr\": \"10.10.10.0/24\"", "TestSecurityGroupDefaultRulesClient(base.BaseServiceTest): FAKE_RULE = { \"from_port\": 80, \"id\": 1, \"ip_protocol\": \"TCP\",", "or agreed to in writing, software # distributed under the", "{\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def test_create_security_default_group_rule_with_bytes_body(self): self._test_create_security_default_group_rule(bytes_body=True)", "required by applicable law or agreed to in writing, software", "\"from_port\": 80, \"id\": 1, \"ip_protocol\": \"TCP\", \"ip_range\": { \"cidr\": \"10.10.10.0/24\"", "def _test_create_security_default_group_rule(self, bytes_body=False): request_body = { \"to_port\": 80, \"from_port\": 80,", "self._test_show_security_group_default_rule() def test_show_security_group_default_rule_with_bytes_body(self): self._test_show_security_group_default_rule(bytes_body=True) def _test_create_security_default_group_rule(self, bytes_body=False): request_body = {", "self.client.create_security_default_group_rule, 'tempest.lib.common.rest_client.RestClient.post', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, **request_body) def test_create_security_default_group_rule_with_str_body(self): self._test_create_security_default_group_rule() def", "def _test_show_security_group_default_rule(self, bytes_body=False): self.check_service_client_function( self.client.show_security_group_default_rule, 'tempest.lib.common.rest_client.RestClient.get', {\"security_group_default_rule\": self.FAKE_RULE}, to_utf=bytes_body, security_group_default_rule_id=1)", "= fake_auth_provider.FakeAuthProvider() self.client = (security_group_default_rules_client. SecurityGroupDefaultRulesClient(fake_auth, 'compute', 'regionOne')) def _test_list_security_group_default_rules(self,", "NEC Corporation. All rights reserved. # # Licensed under the", "License. from tempest.lib.services.compute import security_group_default_rules_client from tempest.tests.lib import fake_auth_provider from", "or implied. See the # License for the specific language", "Apache License, Version 2.0 (the \"License\"); you may # not" ]
[ "with open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\") if (len(sys.argv) == 1)", "INFO: {message}\") def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} -", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime} - WARNING:", "a Strong Password for the User! def Generate(PassLen): JoinChars =", "Strong Password for the User! def Generate(PassLen): JoinChars = []", "the User! def Generate(PassLen): JoinChars = [] # Create an", "the List of these String Operations, and Join them to", "colorama import init import datetime import string import random import", "PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password for You.\") Password = Generate(random.randint(5,", "sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\") sys.exit() # Exiting the program", "def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\")", "This will Generate a Strong Password for the User! def", "import sys import os # Initilaze File organizer. os.system('title PassGen')", "if (len(sys.argv) == 1) or (len(sys.argv) > 1 and sys.argv[1].lower()", "random passoword. return \"\".join(JoinChars[0:PassLen]) # Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\")", "for the User! def Generate(PassLen): JoinChars = [] # Create", "def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT +", "= True) # Create Log Functions. class LOG: def INFO_LOG(message):", "Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password for You.\")", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\") def ERROR_LOG(message): CurrentTime =", "{message}\") def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT", "print(f\"{CurrentTime} - INFO: {message}\") def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "List of these String Operations, and Join them to JoinChars", "them to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle", "from colorama import Fore, Style from colorama import init import", "# These imports will be used for this project. from", "True) # Create Log Functions. class LOG: def INFO_LOG(message): CurrentTime", "(len(sys.argv) == 1) or (len(sys.argv) > 1 and sys.argv[1].lower() !=", "will Generate a Strong Password for the User! def Generate(PassLen):", "imports will be used for this project. from colorama import", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\") def STATUS_LOG(message): CurrentTime", "Create an Empty List. # Split the List of these", "+ Style.BRIGHT + f\"{CurrentTime} - ERROR: {message}\") def WARN_LOG(message): CurrentTime", "Operations, and Join them to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation))", "You.\") Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is: {Password}\") with", "# This will Generate a Strong Password for the User!", "import os # Initilaze File organizer. os.system('title PassGen') init(autoreset =", "and Join them to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars)", "# PassGen # These imports will be used for this", "the List. # Get the random passoword. return \"\".join(JoinChars[0:PassLen]) #", "\"\".join(JoinChars[0:PassLen]) # Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random", "os # Initilaze File organizer. os.system('title PassGen') init(autoreset = True)", "Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is: {Password}\") with open(\"Password.log\", \"a\") as", "import string import random import sys import os # Initilaze", "- WARNING: {message}\") # This will Generate a Strong Password", "CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime} -", "the random passoword. return \"\".join(JoinChars[0:PassLen]) # Code Logic here. LOG.WARN_LOG(\"Initialized", "Password for the User! def Generate(PassLen): JoinChars = [] #", "File: File.write(f\"{Password}\\n\") if (len(sys.argv) == 1) or (len(sys.argv) > 1", "datetime import string import random import sys import os #", "STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\") def", "Password is: {Password}\") with open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\") if", "Functions. class LOG: def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime}", "here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password for You.\") Password", "= [] # Create an Empty List. # Split the", "LOG.INFO_LOG(f\"Your Password is: {Password}\") with open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\")", "JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the List. # Get the random", "os.system('title PassGen') init(autoreset = True) # Create Log Functions. class", "Initilaze File organizer. os.system('title PassGen') init(autoreset = True) # Create", "Create Log Functions. class LOG: def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d", "project. from colorama import Fore, Style from colorama import init", "%H:%M:%S') print(Fore.RED + Style.BRIGHT + f\"{CurrentTime} - ERROR: {message}\") def", "this project. from colorama import Fore, Style from colorama import", "init(autoreset = True) # Create Log Functions. class LOG: def", "CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT + f\"{CurrentTime} -", "- STATUS: {message}\") def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED", "as File: File.write(f\"{Password}\\n\") if (len(sys.argv) == 1) or (len(sys.argv) >", "(len(sys.argv) > 1 and sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\") sys.exit()", "open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\") if (len(sys.argv) == 1) or", "Join them to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) #", "Get the random passoword. return \"\".join(JoinChars[0:PassLen]) # Code Logic here.", "File.write(f\"{Password}\\n\") if (len(sys.argv) == 1) or (len(sys.argv) > 1 and", "Style.BRIGHT + f\"{CurrentTime} - ERROR: {message}\") def WARN_LOG(message): CurrentTime =", "import Fore, Style from colorama import init import datetime import", "{Password}\") with open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\") if (len(sys.argv) ==", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT + f\"{CurrentTime} - ERROR:", "<filename>main.py # PassGen # These imports will be used for", "# Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password", "17)) LOG.INFO_LOG(f\"Your Password is: {Password}\") with open(\"Password.log\", \"a\") as File:", "print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime} - WARNING: {message}\") # This", "import random import sys import os # Initilaze File organizer.", "class LOG: def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} -", "- ERROR: {message}\") def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW", "!= \"-o\"): os.system(\"start Password.log\") sys.exit() # Exiting the program successfully.", "{message}\") def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT", "Style from colorama import init import datetime import string import", "random import sys import os # Initilaze File organizer. os.system('title", "ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT + f\"{CurrentTime}", "import datetime import string import random import sys import os", "organizer. os.system('title PassGen') init(autoreset = True) # Create Log Functions.", "a Random Password for You.\") Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your", "# Create an Empty List. # Split the List of", "print(f\"{CurrentTime} - STATUS: {message}\") def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "LOG: def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO:", "def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\")", "+ f\"{CurrentTime} - WARNING: {message}\") # This will Generate a", "\"a\") as File: File.write(f\"{Password}\\n\") if (len(sys.argv) == 1) or (len(sys.argv)", "LOG.STATUS_LOG(\"Generating a Random Password for You.\") Password = Generate(random.randint(5, 17))", "# Shuffle the List. # Get the random passoword. return", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT + f\"{CurrentTime} - ERROR: {message}\")", "WARNING: {message}\") # This will Generate a Strong Password for", "Style.BRIGHT + f\"{CurrentTime} - WARNING: {message}\") # This will Generate", "Empty List. # Split the List of these String Operations,", "File organizer. os.system('title PassGen') init(autoreset = True) # Create Log", "These imports will be used for this project. from colorama", "used for this project. from colorama import Fore, Style from", "JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the List. # Get the", "def Generate(PassLen): JoinChars = [] # Create an Empty List.", "for this project. from colorama import Fore, Style from colorama", "Log Functions. class LOG: def INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "INFO_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\") def", "- INFO: {message}\") def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime}", "WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime}", "Password for You.\") Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is:", "Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is: {Password}\") with open(\"Password.log\",", "%H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\") def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d", "f\"{CurrentTime} - ERROR: {message}\") def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')", "List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the List. #", "f\"{CurrentTime} - WARNING: {message}\") # This will Generate a Strong", "CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\") def ERROR_LOG(message):", "of these String Operations, and Join them to JoinChars List.", "Split the List of these String Operations, and Join them", "for You.\") Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is: {Password}\")", "will be used for this project. from colorama import Fore,", "+ f\"{CurrentTime} - ERROR: {message}\") def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d", "String Operations, and Join them to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits))", "init import datetime import string import random import sys import", "%H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\") def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d", "[] # Create an Empty List. # Split the List", "= Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password is: {Password}\") with open(\"Password.log\", \"a\")", "List. # Split the List of these String Operations, and", "Random Password for You.\") Password = Generate(random.randint(5, 17)) LOG.INFO_LOG(f\"Your Password", "{message}\") def STATUS_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS:", "to JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the", "Generate a Strong Password for the User! def Generate(PassLen): JoinChars", "Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password for", "be used for this project. from colorama import Fore, Style", "= datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - STATUS: {message}\") def ERROR_LOG(message): CurrentTime", "these String Operations, and Join them to JoinChars List. JoinChars.extend(list(string.ascii_letters))", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\") def STATUS_LOG(message): CurrentTime =", "random.shuffle(JoinChars) # Shuffle the List. # Get the random passoword.", "sys import os # Initilaze File organizer. os.system('title PassGen') init(autoreset", "PassGen') init(autoreset = True) # Create Log Functions. class LOG:", "string import random import sys import os # Initilaze File", "+ Style.BRIGHT + f\"{CurrentTime} - WARNING: {message}\") # This will", "import init import datetime import string import random import sys", "or (len(sys.argv) > 1 and sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\")", "datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime} - WARNING: {message}\")", "LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a Random Password for You.\") Password =", "and sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\") sys.exit() # Exiting the", "return \"\".join(JoinChars[0:PassLen]) # Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating a", "== 1) or (len(sys.argv) > 1 and sys.argv[1].lower() != \"-o\"):", "User! def Generate(PassLen): JoinChars = [] # Create an Empty", "# Split the List of these String Operations, and Join", "> 1 and sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\") sys.exit() #", "1 and sys.argv[1].lower() != \"-o\"): os.system(\"start Password.log\") sys.exit() # Exiting", "Generate(PassLen): JoinChars = [] # Create an Empty List. #", "def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED + Style.BRIGHT +", "ERROR: {message}\") def WARN_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.YELLOW +", "{message}\") # This will Generate a Strong Password for the", "PassGen # These imports will be used for this project.", "Fore, Style from colorama import init import datetime import string", "Shuffle the List. # Get the random passoword. return \"\".join(JoinChars[0:PassLen])", "from colorama import init import datetime import string import random", "%H:%M:%S') print(Fore.YELLOW + Style.BRIGHT + f\"{CurrentTime} - WARNING: {message}\") #", "is: {Password}\") with open(\"Password.log\", \"a\") as File: File.write(f\"{Password}\\n\") if (len(sys.argv)", "JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the List. # Get", "JoinChars = [] # Create an Empty List. # Split", "passoword. return \"\".join(JoinChars[0:PassLen]) # Code Logic here. LOG.WARN_LOG(\"Initialized PassGen!\") LOG.STATUS_LOG(\"Generating", "# Initilaze File organizer. os.system('title PassGen') init(autoreset = True) #", "JoinChars List. JoinChars.extend(list(string.ascii_letters)) JoinChars.extend(list(string.digits)) JoinChars.extend(list(string.punctuation)) random.shuffle(JoinChars) # Shuffle the List.", "colorama import Fore, Style from colorama import init import datetime", "CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(f\"{CurrentTime} - INFO: {message}\") def STATUS_LOG(message):", "STATUS: {message}\") def ERROR_LOG(message): CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') print(Fore.RED +", "print(Fore.RED + Style.BRIGHT + f\"{CurrentTime} - ERROR: {message}\") def WARN_LOG(message):", "List. # Get the random passoword. return \"\".join(JoinChars[0:PassLen]) # Code", "# Get the random passoword. return \"\".join(JoinChars[0:PassLen]) # Code Logic", "1) or (len(sys.argv) > 1 and sys.argv[1].lower() != \"-o\"): os.system(\"start", "an Empty List. # Split the List of these String", "# Create Log Functions. class LOG: def INFO_LOG(message): CurrentTime =" ]
[ "sig in siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return", "none\" if user == None: return None memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page", "have access to your own memo's if self.user.username == user.username:", "if self.confidential == False: return True # at this point", "memo # revise an existing memo new_memo = Memo(number =", "valid_references(references): current_app.logger.info(f'references ={references}') valid_memos = [] valid_refs = [] invalid", "self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active: return True return", "== self.number,Memo.version != self.version).all() for memo in prev_list: if memo.memo_state", "anyone can access if self.confidential == False: return True #", "return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions ######################################## @staticmethod def can_create(owner=None,", "import db from memos.models.User import User from memos.models.MemoState import MemoState", "MemoSignature.get_signers(self) for sig in siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate =", "signer=None, delegate=None): \"\"\" can this memo be rejected by the", "self._signers = signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for signer in", "as 'A' def save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################ # functions", "= memo.distribution,\\ keywords = memo.keywords,\\ title = memo.title,\\ num_files =", "by the delegate. Only memos in signoff can be rejected\"\"\"", "self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making", "{new_memo}\") return new_memo if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a draft", "better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False ########################################", "return True # signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer}", "{} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']=", "This function will return None or a new Memo if", "MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers = ''", "rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references = references refs", "current_app.logger.info(f\"Creating new memo {new_memo}\") return new_memo if memo.memo_state == MemoState.Draft:", "== None or memo==None: memo_number = Memo.get_next_number(owner) new_memo = Memo(number", "f.close() @property def signers(self): # get the signers from the", "obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all() for memo", "return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references = references refs =", "the relative path of this memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\")", "confidential = memo.confidential,\\ distribution = memo.distribution,\\ keywords = memo.keywords,\\ title", "= os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path", "user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list == None: return", "path to a file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path", "return True ################################################################################ # End of State machine functions ################################################################################", "self.action_date = datetime.utcnow() self.submit_date = None self.active_date = None self.obsolete_date", "path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self): \"\"\" Return the", "Can this memo be obsoleted by the delegate? Only active", "self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date =", "Memo(db.Model): \"\"\"This class is the single interface to a \"memo\"", "\"username\" has access to self\"\"\" # if it is not", "a new version of an existing memo) if memo_number ==", "return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self) users", "False # The list of signers and if they have", "!= None if owner == None or delegate == None:", "MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not status['status'] def can_unsign(self, signer=None, delegate=None):", "\"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self): \"\"\" Return", "unsigned by delegate for the signer \"\"\" if signer is", "True: return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new", "= [] for ref in refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2])", "delegate. Only memos in signoff can be rejected\"\"\" if signer", "has been published\") if self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state", "has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\") self.action_date", "True of the \"username\" has access to self\"\"\" # if", "moved to obsolete state (from active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False)", "def get_next_number(user=None): assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list", "delegate for the signer \"\"\" if signer is None or", "if not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active: return", "MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are a signer you can reject..", "rval = [] for ref in refs: userid=ref[0] memo =", "Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)} {memolist}\") return", "# if you are a signer you can reject.. even", "import MemoFile from memos.models.MemoSignature import MemoSignature from memos.models.MemoReference import MemoReference", "os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\")", "MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\") return new_memo if memo.memo_state ==", "\"\"\" This function gives the os path to a file", "# when the memo was moved to obsolete state (from", "title = memo.title,\\ num_files = 0,\\ user_id = memo.user_id,\\ memo_state", "user names on the distribution keywords = db.Column(db.String(128), default='') #", "json from datetime import datetime from flask import current_app from", "return memo_list def saveJson(self): \"\"\" Create the JSON file which", "current_app.logger.info(f\"Found a draft memo {memo}\") return memo # revise an", "self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete: return True def", "default='') # any keyword title = db.Column(db.String(128), nullable=False, default='') #", "Functions ######################################## def get_fullpath(self): \"\"\" This function gives the os", "MemoReference from memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity import MemoActivity from", "= datetime.utcnow() self.save() # TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution", "this memo be obsoleted by the delegate? Only active memos", "os import shutil import json from datetime import datetime from", "{'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self) users =", "memo.memo_state == MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This", "self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True # Owner Function", "create a memo for the owner\"\"\" if owner is None:", "memo action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time", "(from active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of", "on the distribution keywords = db.Column(db.String(128), default='') # any keyword", "[] valid_refs = [] invalid = [] for memo_ref in", "== '': continue parts = Memo.parse_reference(memo_ref) if len(parts) > 3", "import MemoSignature from memos.models.MemoReference import MemoReference from memos.models.MemoHistory import MemoHistory", "memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title != None:", "{user.username} = Items={len(memolist.items)} {memolist}\") return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert", "memo_id != None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry", "state (from submitted) obsolete_date = db.Column(db.DateTime) # when the memo", "js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']=", "= '',\\ keywords = '',\\ title = '',\\ num_files =", "memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution = memo.distribution,\\ keywords = memo.keywords,\\", "= memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None):", "signers and if they have signed are kept in the", "= {signer} delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\")", "= datetime.utcnow(),\\ ) new_memo.save() new_memo.references = memo.references['ref_string'] # cannot be", "2: parts.append(None) return parts @staticmethod def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos", "siglist = MemoSignature.get_signers(self) for sig in siglist: sig.signer = User.find(username=sig.signer_id)", "function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_reject(signer,delegate):", "this memo be unsigned by delegate for the signer \"\"\"", "= MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status'] def can_obsolete(self, delegate=None): \"\"\"", "from datetime import datetime from flask import current_app from memos", "MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by", "if user == None: return None msigs = MemoSignature.get_signatures(user,signed=False) memolist", "MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has", ".paginate(page = page,per_page=pagesize) elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page =", "\"\"\"Will return true if the delegate can create a memo", "owner\"\"\" if owner is None: return False if delegate is", "in the distribution list then provide access TODO: ARH do", "files attached to the memo action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)", "for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '': continue parts", "rev_to_b10 class Memo(db.Model): \"\"\"This class is the single interface to", "return None if owner.is_delegate(delegate) != True: return None memo =", "the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer function def", "True # if the username is in the distribution list", "is in the distribution list then provide access TODO: ARH", "def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id}", "self.user.username == user.username: return True if user.admin: return True if", "== self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if memo: return", "ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter", "{self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return False self.memo_state = MemoState.Obsolete", "already signed return status['is_signer'] def has_access(self, user=None): \"\"\"This function will", "current_app.logger.info(f\"Memo = {memo}\") if memo != None and (memo.memo_state ==", "= MemoState.Draft self.action_date = datetime.utcnow() self.submit_date = None self.active_date =", "if not self.can_obsolete(delegate=delegate): return False self.memo_state = MemoState.Obsolete self.action_date =", "# References ###################################################################### @staticmethod def parse_reference(reference): parts = re.split(r'-',reference) if", "datetime from flask import current_app from memos import db from", "= MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function is called when:", "submitted (from created) active_date = db.Column(db.DateTime) # when the memo", "the delegate? Only active memos can be obsoleted \"\"\" if", "is created # 2- a signature happens # 3- an", "this function will return a list of refeference objects +", "until there is an id assigned by the save new_memo.save()", "owner.username,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\", "= db.Column(db.Integer) # Memo Number version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA", "= MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username}", "== MemoState.Draft: if MemoSignature.status(self.id) == False: self.memo_state = MemoState.Signoff self.submit_date", "# get the signers from the signing table and turn", "for the signer \"\"\" if signer is None or delegate", "return new_memo if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a draft memo", "User.valid_usernames(signer_names) for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ######################################################################", "{'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return rval @property def references(self): #", "# The number of files attached to the memo action_date", "# 3- an unsign happens def process_state(self,acting=None): if self.memo_state ==", "@property def references(self): # this function will return a list", "return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new memo", "{self.user.username}-{self.number}-{self.version} has been published\") if self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id):", "signer, distribution can read distribution = db.Column(db.String(128), default='') # user", "= db.Column(db.String(128), default='') # any keyword title = db.Column(db.String(128), nullable=False,", "js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers']", "default=0) # The number of files attached to the memo", "current_app.logger.info(f\"Signatures Still Required\") self.action_date = datetime.utcnow() self.save() # TODO: ARH", "of an existing memo) if memo_number == None or memo==None:", "new_memo.references = memo.references['ref_string'] # cannot be done until there is", "memo = Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\")", "none\" if user == None: return None msigs = MemoSignature.get_signatures(user,signed=False)", "by {delegate.username}\") return True ################################################################################ # End of State machine", "[] invalid = [] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref", "of the user who owns the memo _signers = db.Column(db.String(128),default='')", "function def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign)", "== MemoState.Active or self.memo_state == MemoState.Obsolete: return True def can_sign(self,", "= memo.keywords,\\ title = memo.title,\\ num_files = 0,\\ user_id =", "= memo.confidential,\\ distribution = memo.distribution,\\ keywords = memo.keywords,\\ title =", "== MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function", "== MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos'", "from the signing table and turn it back to a", "file which is a copy of all of the meta", "num_files = db.Column(db.Integer, default=0) # The number of files attached", "import MemoReference from memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity import MemoActivity", "js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for file in self.get_files(): js['files'].append(file.filename) path", "= Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo))", "def can_cancel(self, delegate=None): \"\"\" can this memo be cancled by", "return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not be", "user=None): \"\"\"This function will return True of the \"username\" has", "= parts[0] memo_number = parts[1] memo_version = parts[2] memo =", "# also known as 'A' def save(self): db.session.add(self) db.session.commit() self.saveJson()", "done until there is an id assigned by the save", "assigned by the save new_memo.signers = memo._signers # cannot be", "by the delegate? Only active memos can be obsoleted \"\"\"", "when the memo was most recently submitted (from created) active_date", "current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}\") continue username =", "None: return 1 return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User", "{path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f) f.close() @property", "user who owns the memo _signers = db.Column(db.String(128),default='') # the", "# cannot be done until there is an id assigned", "return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also known as 'A' def", "b10_to_rev(1) # also known as 'A' def save(self): db.session.add(self) db.session.commit()", "an existing memo new_memo = Memo(number = memo_number,\\ version =", "'A' def save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################ # functions used", "False: return True # at this point we know it", "or len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos}", "return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True # Owner Function", "or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval =", "True # Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if", "if self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate): return", "list of the files attached to this memo\"\"\" memo_list =", "signers from the signing table and turn it back to", "1- a valid draft is created # 2- a signature", "def can_reject(self, signer=None, delegate=None): \"\"\" can this memo be rejected", "create a new memo (i.e. not a new version of", "None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def", "a signer you can reject.. even if you have already", "{self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\")", "= parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if memo", "= Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod", "for memo in prev_list: if memo.memo_state == MemoState.Active: memo.memo_state =", "rejected for {signer.username} by {delegate.username}\") return True ################################################################################ # End", "# The key of the user who owns the memo", "def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking", "{memo.id} {memo.number} {memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) #", "users = User.valid_usernames(signer_names) for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### #", "{memo}\") if memo != None and (memo.memo_state == MemoState.Active or", "current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)} {memolist}\") return memolist @staticmethod def", "is None: return False if not self.user.is_delegate(delegate): return False if", "Create the JSON file which is a copy of all", "MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ ) new_memo.save() new_memo.references", "user!=None,\"User must not be none\" if user == None: return", "== None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def", "Still Required\") self.action_date = datetime.utcnow() self.save() # TODO: ARH def", "Looking for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None:", "it is not confidential than anyone can access if self.confidential", "db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False) # if true", "create_date = datetime.utcnow(),\\ signers = '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating", "gives the os path to a file \"\"\" path =", "function will return True of the \"username\" has access to", "= MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self): \"\"\" Create the JSON", "\"\"\" js = {} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords", "False if self.memo_state != MemoState.Draft: return False if not self.user.is_delegate(delegate=delegate):", "into signoff\") else: self.memo_state = MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)", "cancled by the delegate. Only drafts memos can be canceled\"\"\"", "memo be obsoleted by the delegate? Only active memos can", "when the memo was created submit_date = db.Column(db.DateTime) # when", "MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True # Owner Function def obsolete(self,delegate=None):", "a file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self):", "if memo_number == None or memo==None: memo_number = Memo.get_next_number(owner) new_memo", "title of the memo num_files = db.Column(db.Integer, default=0) # The", "not a new version of an existing memo) if memo_number", "MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") if self.memo_state ==", "The model file for a Memo \"\"\" import re import", "js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for", "= MemoSignature.get_signers(self) for sig in siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate", "= [] invalid = [] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if", "return 1 return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must", ".order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\", "return True if user.readAll: return True # if the username", "if you have already signed return status['is_signer'] def has_access(self, user=None):", "memo's if self.user.username == user.username: return True if user.admin: return", "@signers.setter def signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names)", "################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None: return Memo.query.filter_by(id=memo_id).first()", "refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None: refstring=f\"{userid}-{ref[1]}\"", "\"\"\" Can this memo be obsoleted by the delegate? Only", "js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded", ") new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\") return new_memo if", "sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True # signer function def", "if it is not confidential than anyone can access if", "state # these function would classiavally be called private ################################################################################", "@property def signers(self): # get the signers from the signing", "return False if not signer.is_delegate(delegate=delegate): return False # The list", "num_files = 0,\\ user_id = owner.username,\\ memo_state = MemoState.Draft,\\ action_date", "delegate=None): \"\"\"Can this memo be signed by delegate for the", "be cancled by the delegate. Only drafts memos can be", "can create a memo for the owner\"\"\" if owner is", "Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\", "MemoState from memos.models.MemoFile import MemoFile from memos.models.MemoSignature import MemoSignature from", "keyword title = db.Column(db.String(128), nullable=False, default='') # The title of", "def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page", "'',\\ keywords = '',\\ title = '',\\ num_files = 0,\\", "signers(self): # get the signers from the signing table and", "MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status'] def can_obsolete(self, delegate=None): \"\"\" Can", "be done until there is an id assigned by the", "& the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\")", "import current_app from memos import db from memos.models.User import User", "User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self)", "MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True # signer", "would classiavally be called private ################################################################################ def obsolete_previous(self,acting=None): prev_list =", "_references = db.Column(db.String(128),default='') # The hidden list of references memo_state", "re.split(r'-',reference) if len(parts) == 2: parts.append(None) return parts @staticmethod def", "parse_reference(reference): parts = re.split(r'-',reference) if len(parts) == 2: parts.append(None) return", "def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all() for", "= re.split(r'-',reference) if len(parts) == 2: parts.append(None) return parts @staticmethod", "to active state (from submitted) obsolete_date = db.Column(db.DateTime) # when", "Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page", "MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number ==", "datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ ) new_memo.save() new_memo.references = memo.references['ref_string'] #", "Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate):", "if the delegate can create a memo for the owner\"\"\"", "2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}\") continue", "js['references']= self.references['ref_string'] js['files']=[] for file in self.get_files(): js['files'].append(file.filename) path =", "new version of an existing memo) if memo_number == None", "{message}\") ################################################################################ # State machine functions called by the viewcontroller", "attached to this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def", "return memo_list @staticmethod def get_next_number(user=None): assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\", "revise this memo \"\"\" assert owner != None and delegate", "number of files attached to the memo action_date = db.Column(db.DateTime,", "'',\\ title = '',\\ num_files = 0,\\ user_id = owner.username,\\", "MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)", "a string and a list siglist = MemoSignature.get_signers(self) for sig", "shutil import json from datetime import datetime from flask import", "= f\"{self}\" if not self.can_cancel(delegate=delegate): return False MemoFile.delete(self) # delete", "create_date = datetime.utcnow(),\\ ) new_memo.save() new_memo.references = memo.references['ref_string'] # cannot", "def can_revise(self, delegate=None): \"\"\"Is the delgate allowed to update \"this\"", "{self.user.username}-{self.number}-{self.version} has gone into signoff\") else: self.memo_state = MemoState.Active self.active_date", "the username is in the distribution list then provide access", "End of State machine functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if", "distribution = memo.distribution,\\ keywords = memo.keywords,\\ title = memo.title,\\ num_files", "\"\"\" import re import os import shutil import json from", "not signer.is_delegate(delegate=delegate): return False # The list of signers and", "None if owner.is_delegate(delegate) != True: return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first()", "################################################################################ # State machine functions called by the viewcontroller ################################################################################", ") new_memo.save() new_memo.references = memo.references['ref_string'] # cannot be done until", "Delegate={delegate}\") memostring = f\"{self}\" if not self.can_cancel(delegate=delegate): return False MemoFile.delete(self)", "def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will return None or a", "if not signer.is_delegate(delegate=delegate): return False # The list of signers", "from memos.models.User import User from memos.models.MemoState import MemoState from memos.models.MemoFile", "memo_ref == '': continue parts = Memo.parse_reference(memo_ref) if len(parts) >", "if not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True", "memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None: memoQry.filter_by(version=memo_version) memo =", "Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f", ": valid_memos,'invalid':invalid} return rval @property def references(self): # this function", "must provide a username if user is None: return False", "None or delegate is None: return False if self.memo_state !=", "a Memo \"\"\" import re import os import shutil import", "return False ######################################## # ??? Functions ######################################## def get_fullpath(self): \"\"\"", "Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return MemoReference.get_back_refs(self)", "custom initialization here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return", "be called private ################################################################################ def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number ==", "= page,per_page=pagesize) return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if", "Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def get_next_number(user=None): assert user!=None", "of the files in that directory & the directory shutil.rmtree(self.get_fullpath())", "gone into signoff\") else: self.memo_state = MemoState.Active self.active_date = datetime.utcnow()", "or memo==None: memo_number = Memo.get_next_number(owner) new_memo = Memo(number = memo_number,\\", "self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1)", "been rejected for {signer.username} by {delegate.username}\") return True ################################################################################ #", "return parts @staticmethod def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos = []", "usernames _references = db.Column(db.String(128),default='') # The hidden list of references", "Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return", "< 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}\")", "the references refs = MemoReference.get_refs(self) rval = [] for ref", "the delegate can create a memo for the owner\"\"\" if", "and if they have signed are kept in the MemoSignature", "self\"\"\" # if it is not confidential than anyone can", "signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ###################################################################### @staticmethod def", "def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos = [] valid_refs = []", "True # signer function def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return", "@staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\", "def saveJson(self): \"\"\" Create the JSON file which is a", "False if not signer.is_delegate(delegate): return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) #", "None: return None if owner.is_delegate(delegate) != True: return None memo", "References ###################################################################### @staticmethod def parse_reference(reference): parts = re.split(r'-',reference) if len(parts)", "= datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") if", "a list siglist = MemoSignature.get_signers(self) for sig in siglist: sig.signer", "# user names on the distribution keywords = db.Column(db.String(128), default='')", "def get_files(self): \"\"\" Return a list of the files attached", "if MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version}", "\"memo\" and all of the \"memos\" \"\"\" id = db.Column(db.Integer,", "current_app.logger.info(f'references ={references}') valid_memos = [] valid_refs = [] invalid =", "###################################################################### # References ###################################################################### @staticmethod def parse_reference(reference): parts = re.split(r'-',reference)", "valid={valid_memos} invalid {invalid}\") continue username = parts[0] memo_number = parts[1]", "!= None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords !=", "has been rejected for {signer.username} by {delegate.username}\") return True ################################################################################", "the memo num_files = db.Column(db.Integer, default=0) # The number of", "memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif", "= Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list == None: return 1 return", "MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\") self.action_date = datetime.utcnow() self.save()", "userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else:", "False if not signer.is_delegate(delegate=delegate): return False status = MemoSignature.is_signer(self.id,signer) return", "memos can be canceled\"\"\" if delegate is None: return False", "'A',\\ confidential = False,\\ distribution = '',\\ keywords = '',\\", "can_cancel(self, delegate=None): \"\"\" can this memo be cancled by the", "is None or delegate is None: return False if self.memo_state", "return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\", "get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if user ==", "datetime.utcnow() self.submit_date = None self.active_date = None self.obsolete_date = None", "Signoff, Active, Obsolete def __init__(self, **kwargs): super().__init__(**kwargs) # do custom", "your own memo's if self.user.username == user.username: return True if", "access TODO: ARH do something better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution):", "action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ ) new_memo.save() new_memo.references =", "keywords != None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list", "memostring = f\"{self}\" if not self.can_cancel(delegate=delegate): return False MemoFile.delete(self) #", "== MemoState.Draft: current_app.logger.info(f\"Found a draft memo {memo}\") return memo #", "time anything happened create_date = db.Column(db.DateTime) # when the memo", "signers\"\"\" if signer is None or delegate is None: return", "called private ################################################################################ def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version", "the state # these function would classiavally be called private", "= db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) # Memo Number version", "== MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state ==", "True # Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring", "a new Memo if the owner/delgate and revise this memo", "b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This class is the single interface", "# ??? Functions ######################################## def get_fullpath(self): \"\"\" This function gives", "keywords = db.Column(db.String(128), default='') # any keyword title = db.Column(db.String(128),", "ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") # TODO: ARH", "memo != None and (memo.memo_state == MemoState.Active or memo.memo_state ==", "valid_memos,'invalid':invalid} return rval @property def references(self): # this function will", "return True return False ######################################## # ??? Functions ######################################## def", "Memo if the owner/delgate and revise this memo \"\"\" assert", "and not status['status'] def can_unsign(self, signer=None, delegate=None): \"\"\"Can this memo", "db.Column(db.String(128), default='') # any keyword title = db.Column(db.String(128), nullable=False, default='')", "!= MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return False status", "self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\") return False current_app.logger.info(\"allowed to sign\")", "None and delegate != None if owner == None or", "== None or delegate == None: return None if owner.is_delegate(delegate)", "self.save() return True # Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self}", "def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################ # State machine functions", "# these function would classiavally be called private ################################################################################ def", "= os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f) f.close() @property def signers(self):", "and all of the \"memos\" \"\"\" id = db.Column(db.Integer, primary_key=True)", "path def get_files(self): \"\"\" Return a list of the files", "# 2- a signature happens # 3- an unsign happens", "and a list siglist = MemoSignature.get_signers(self) for sig in siglist:", "\"\"\" The model file for a Memo \"\"\" import re", ".order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return", "0,\\ user_id = memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\", "signer function def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate)", "table and turn it back to a string and a", "back to a string and a list siglist = MemoSignature.get_signers(self)", "files in that directory & the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self)", "else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return rval", "if MemoSignature.status(self.id) == False: self.memo_state = MemoState.Signoff self.submit_date = datetime.utcnow()", "memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers", "memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\", "self.memo_state = MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has", "False self.memo_state = MemoState.Draft self.action_date = datetime.utcnow() self.submit_date = None", "if you are a signer you can reject.. even if", "= datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ ) new_memo.save() new_memo.references = memo.references['ref_string']", "be signed by delegate for the signers\"\"\" if signer is", "= owner.username,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date =", "= db.Column(db.String(128),default='') # the hidden list of signer usernames _references", "delegate is None: return False if not self.user.is_delegate(delegate): return False", "True return False def can_cancel(self, delegate=None): \"\"\" can this memo", "to process the state # these function would classiavally be", "return False if self.memo_state != MemoState.Draft: return False if not", "return None or a new Memo if the owner/delgate and", "False def can_cancel(self, delegate=None): \"\"\" can this memo be cancled", "\"\"\"This class is the single interface to a \"memo\" and", "a \"memo\" and all of the \"memos\" \"\"\" id =", "list siglist = MemoSignature.get_signers(self) for sig in siglist: sig.signer =", "if memo_version != None: memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found Memo", "__repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission", "signer=None, delegate=None): \"\"\"Can this memo be signed by delegate for", "distribution list then provide access TODO: ARH do something better", "self._references = references refs = Memo.valid_references(references) for i in range(len(refs['valid_refs'])):", "be unsigned by delegate for the signer \"\"\" if signer", "valid_memos = [] valid_refs = [] invalid = [] for", "to your own memo's if self.user.username == user.username: return True", "references(self): # this function will return a list of refeference", "# A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False) # if true only", "False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True # Owner Function def", "find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for", "from memos import db from memos.models.User import User from memos.models.MemoState", "return False if delegate is None: delegate = owner return", "sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers =", "at this point we know it is confidential so ...", "return False self.memo_state = MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date =", "self.process_state(acting=delegate) return True # signer function def unsign(self,signer=None,delegate=None): if not", "os path to a file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return", "from memos.models.MemoSignature import MemoSignature from memos.models.MemoReference import MemoReference from memos.models.MemoHistory", "the \"memos\" \"\"\" id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer)", "MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True # Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete:", "memo {memo}\") return memo # revise an existing memo new_memo", "memo num_files = db.Column(db.Integer, default=0) # The number of files", "Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state ==", "the MemoSignature table status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not", "turn it back to a string and a list siglist", "if len(parts) > 3 or len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID", "3- an unsign happens def process_state(self,acting=None): if self.memo_state == MemoState.Draft:", "datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True # Owner", "# End of State machine functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None):", "version = 'A',\\ confidential = False,\\ distribution = '',\\ keywords", "from memos.models.MemoActivity import MemoActivity from memos.revletter import b10_to_rev, rev_to_b10 class", "MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True # signer function", "if memo_list == None: return 1 return memo_list.number+1 @staticmethod def", "valid_refs = [] invalid = [] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references):", "has gone into signoff\") else: self.memo_state = MemoState.Active self.active_date =", "os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f) f.close() @property def signers(self): #", "functions used to process the state # these function would", "== None: return None if owner.is_delegate(delegate) != True: return None", "len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos} invalid", "a draft memo {memo}\") return memo # revise an existing", "False MemoFile.delete(self) # delete all of the files in that", "# any keyword title = db.Column(db.String(128), nullable=False, default='') # The", "not self.can_obsolete(delegate=delegate): return False self.memo_state = MemoState.Obsolete self.action_date = datetime.utcnow()", "action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything", "any keyword title = db.Column(db.String(128), nullable=False, default='') # The title", "default='') # The title of the memo num_files = db.Column(db.Integer,", "\"\"\" if signer is None or delegate is None: return", "a memo for the owner\"\"\" if owner is None: return", "process the state # these function would classiavally be called", "of this memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def", "has_access(self, user=None): \"\"\"This function will return True of the \"username\"", "None msigs = MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox", "= references refs = Memo.valid_references(references) for i in range(len(refs['valid_refs'])): parsed_ref", "return path def get_files(self): \"\"\" Return a list of the", "current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True # signer", "return True # Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\")", "= db.Column(db.DateTime) # when the memo was most recently submitted", "= MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers =", "provide access TODO: ARH do something better if user.username in", "= {memo}\") if memo != None and (memo.memo_state == MemoState.Active", "!= True: return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a", "False: self.memo_state = MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version}", "if not signer.is_delegate(delegate=delegate): return False status = MemoSignature.is_signer(self.id,signer) return status['is_signer']", "MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function is called when: # 1-", "current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") # TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify", "def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@", "if self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date", "None or delegate == None: return None if owner.is_delegate(delegate) !=", "= page,per_page=pagesize) if keywords != None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page =", "return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is the delgate allowed to", "machine functions called by the viewcontroller ################################################################################ # Owner Function", "return rval @property def references(self): # this function will return", "# The title of the memo num_files = db.Column(db.Integer, default=0)", "recently submitted (from created) active_date = db.Column(db.DateTime) # when the", "# Draft, Signoff, Active, Obsolete def __init__(self, **kwargs): super().__init__(**kwargs) #", "owns the memo _signers = db.Column(db.String(128),default='') # the hidden list", "or delegate == None: return None if owner.is_delegate(delegate) != True:", "create_date = db.Column(db.DateTime) # when the memo was created submit_date", "if delegate is None: return False if not self.user.is_delegate(delegate): return", "3 or len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref}", "sign\") return False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return", "delegate == None: return None if owner.is_delegate(delegate) != True: return", "Number version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False)", "not be none\" if user == None: return None msigs", "Active, Obsolete def __init__(self, **kwargs): super().__init__(**kwargs) # do custom initialization", "True def can_sign(self, signer=None, delegate=None): \"\"\"Can this memo be signed", "MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True # signer function def unsign(self,signer=None,delegate=None):", "new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\") return new_memo if memo.memo_state", "memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def get_next_number(user=None):", "MemoSignature.status(self.id) == False: self.memo_state = MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting)", "called by the viewcontroller ################################################################################ # Owner Function @staticmethod def", "# revise an existing memo new_memo = Memo(number = memo_number,\\", "\"\"\" Return a list of the files attached to this", "self.memo_state = MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete)", "The key of the user who owns the memo _signers", "return true if the delegate can create a memo for", "= MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are a signer you can", "MemoState.Active: return True return False def can_cancel(self, delegate=None): \"\"\" can", "to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True # signer function", "__init__(self, **kwargs): super().__init__(**kwargs) # do custom initialization here def __repr__(self):", "= datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True # Owner Function def", "signer \"\"\" if signer is None or delegate is None:", "not self.can_cancel(delegate=delegate): return False MemoFile.delete(self) # delete all of the", "memo_version != None: memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\")", "title != None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords", "the user who owns the memo _signers = db.Column(db.String(128),default='') #", "# Permission Functions ######################################## @staticmethod def can_create(owner=None, delegate=None): \"\"\"Will return", "= Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif memo_number: memo_list", "memo \"\"\" assert owner != None and delegate != None", "return False def can_cancel(self, delegate=None): \"\"\" can this memo be", "{signer} delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\") return", "by the delegate. Only drafts memos can be canceled\"\"\" if", "to a file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def", "js = {} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id", "rval @property def references(self): # this function will return a", "datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") if self.memo_state", "= Memo.valid_references(references) for i in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user", "db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete def __init__(self, **kwargs): super().__init__(**kwargs)", "# the hidden list of signer usernames _references = db.Column(db.String(128),default='')", "\"\"\" can this memo be rejected by the delegate. Only", "MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ######################################################################", "have already signed return status['is_signer'] def has_access(self, user=None): \"\"\"This function", "save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer function def sign(self,signer=None,delegate=None):", "by the viewcontroller ################################################################################ # Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None):", "page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "get_relpath(self): \"\"\" Return the relative path of this memo \"\"\"", "self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}\")", "memo = memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return memo @staticmethod def", "This function is called when: # 1- a valid draft", "Return the relative path of this memo \"\"\" path =", "False if self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate):", "db.Column(db.String(128),default='') # The hidden list of references memo_state = db.Column(db.Enum(MemoState))", "{'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references = references refs = Memo.valid_references(references)", "= Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)} {memolist}\")", "db.Column(db.DateTime) # when the memo was moved to obsolete state", "by the save new_memo.signers = memo._signers # cannot be done", "== MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date = datetime.utcnow()", "if len(parts) == 2: parts.append(None) return parts @staticmethod def valid_references(references):", "assigned by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer", "return memo # revise an existing memo new_memo = Memo(number", "self.saveJson() ################################################################################ # functions used to process the state #", "page,per_page=pagesize) return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title", "return False if not self.user.is_delegate(delegate): return False if self.memo_state ==", "an unsign happens def process_state(self,acting=None): if self.memo_state == MemoState.Draft: if", "self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into signoff\") else: self.memo_state = MemoState.Active", "{delegate.username}\") return True ################################################################################ # End of State machine functions", "Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring = f\"{self}\"", "self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been", "datetime.utcnow(),\\ signers = '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo", "memo {new_memo}\") return new_memo if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a", "assert owner != None and delegate != None if owner", "happened create_date = db.Column(db.DateTime) # when the memo was created", "import b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This class is the single", "path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\")", "refeference objects + a string of the references refs =", "signing table and turn it back to a string and", "canceled\"\"\" if delegate is None: return False if self.memo_state !=", "!= self.version).all() for memo in prev_list: if memo.memo_state == MemoState.Active:", "!= None and delegate != None if owner == None", "== False: self.memo_state = MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo", "False if not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active", "def can_obsolete(self, delegate=None): \"\"\" Can this memo be obsoleted by", "= os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self): \"\"\" Return a list", "you alway have access to your own memo's if self.user.username", "self.process_state(acting=delegate) return True # Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self}", "version = memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution = memo.distribution,\\ keywords", "an id assigned by the save new_memo.signers = memo._signers #", "initialization here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\"", "TODO: ARH do something better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return", "the memo action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last", "status['status'] def can_unsign(self, signer=None, delegate=None): \"\"\"Can this memo be unsigned", "if memo != None and (memo.memo_state == MemoState.Active or memo.memo_state", "know it is confidential so ... they must provide a", "len(parts) == 2: parts.append(None) return parts @staticmethod def valid_references(references): current_app.logger.info(f'references", "MemoFile from memos.models.MemoSignature import MemoSignature from memos.models.MemoReference import MemoReference from", "then provide access TODO: ARH do something better if user.username", "= page,per_page=pagesize) elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search", "this memo be signed by delegate for the signers\"\"\" if", "return False if not signer.is_delegate(delegate): return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer)", "not confidential than anyone can access if self.confidential == False:", "... they must provide a username if user is None:", "relative path of this memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return", "current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring = f\"{self}\" if not self.can_cancel(delegate=delegate): return", "a valid draft is created # 2- a signature happens", "== None: return 1 return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert", "Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring = f\"{self}\" if", "################################################################################ # Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function", "json.dump(js,f) f.close() @property def signers(self): # get the signers from", "= MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\")", "memo be unsigned by delegate for the signer \"\"\" if", "copy of all of the meta data \"\"\" js =", "Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will return None", "State machine functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id !=", ".order_by(Memo.number.desc()).first() if memo_list == None: return 1 return memo_list.number+1 @staticmethod", "from memos.models.MemoFile import MemoFile from memos.models.MemoSignature import MemoSignature from memos.models.MemoReference", "signed by delegate for the signers\"\"\" if signer is None", "memos in signoff can be rejected\"\"\" if signer is None", "(from submitted) obsolete_date = db.Column(db.DateTime) # when the memo was", "db.session.commit() current_app.logger.info(f\"Canceling\") return True # signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer", "memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity import MemoActivity from memos.revletter import", "= None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version}", "def has_access(self, user=None): \"\"\"This function will return True of the", "files attached to this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list", "return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions", "id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) # Memo Number", "also known as 'A' def save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################", "######################################## # ??? Functions ######################################## def get_fullpath(self): \"\"\" This function", "db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True # signer function def reject(self,signer=None,delegate=None):", "not signer.is_delegate(delegate=delegate): return False status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and", "happens # 3- an unsign happens def process_state(self,acting=None): if self.memo_state", "{signer} delegate={delegate}\") if not self.can_reject(signer,delegate): return False self.memo_state = MemoState.Draft", "# when the memo was moved to active state (from", "Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new memo (i.e. not a new", "in prev_list: if memo.memo_state == MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting)", "can reject.. even if you have already signed return status['is_signer']", "The hidden list of references memo_state = db.Column(db.Enum(MemoState)) # Draft,", "Only memos in signoff can be rejected\"\"\" if signer is", "has access to self\"\"\" # if it is not confidential", "meta data \"\"\" js = {} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential", "self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") if self.memo_state == MemoState.Signoff:", "new_memo = Memo(number = memo_number,\\ version = memo.get_next_version(),\\ confidential =", "False status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status'] def can_obsolete(self,", "id={memo}\") return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list =", "for the signers\"\"\" if signer is None or delegate is", "Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f) f.close()", "than anyone can access if self.confidential == False: return True", "memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list", "memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self): \"\"\" Create", "MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos' :", "if memo.memo_state == MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() #", "hidden list of signer usernames _references = db.Column(db.String(128),default='') # The", "= memo_number,\\ version = memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution =", "if the owner/delgate and revise this memo \"\"\" assert owner", "js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for file in self.get_files():", "if not self.can_cancel(delegate=delegate): return False MemoFile.delete(self) # delete all of", "memo.confidential,\\ distribution = memo.distribution,\\ keywords = memo.keywords,\\ title = memo.title,\\", "all of the meta data \"\"\" js = {} js['title']=self.title", "there is an id assigned by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)", "was moved to active state (from submitted) obsolete_date = db.Column(db.DateTime)", "backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def get_next_version(self): memo =", "if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a draft memo {memo}\") return", "username if user is None: return False # you alway", "if user.admin: return True if user.readAll: return True # if", "the meta data \"\"\" js = {} js['title']=self.title js['number']=self.number js['version']=self.version", "delegate={delegate}\") if not self.can_reject(signer,delegate): return False self.memo_state = MemoState.Draft self.action_date", "when: # 1- a valid draft is created # 2-", "is None: return False # you alway have access to", "in refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None:", "def references(self,references): self._references = references refs = Memo.valid_references(references) for i", "the memo was moved to active state (from submitted) obsolete_date", "return True # signer function def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate):", "for a Memo \"\"\" import re import os import shutil", "this memo be cancled by the delegate. Only drafts memos", "memo==None: memo_number = Memo.get_next_number(owner) new_memo = Memo(number = memo_number,\\ version", "Memo.parse_reference(memo_ref) if len(parts) > 3 or len(parts) < 2: invalid.append(memo_ref)", "will return True of the \"username\" has access to self\"\"\"", "= None self.active_date = None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self)", "of the \"memos\" \"\"\" id = db.Column(db.Integer, primary_key=True) number =", "not signer.is_delegate(delegate): return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you", "viewcontroller ################################################################################ # Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This", "len(parts) > 3 or len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length", "interface to a \"memo\" and all of the \"memos\" \"\"\"", "in self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True)", "return False if not self.user.is_delegate(delegate=delegate): return False return True def", "string of the references refs = MemoReference.get_refs(self) rval = []", "an existing memo) if memo_number == None or memo==None: memo_number", "MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ###################################################################### @staticmethod def parse_reference(reference): parts =", "= datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True #", "MemoState.Draft self.action_date = datetime.utcnow() self.submit_date = None self.active_date = None", "= {} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\"", "= open(path,\"w\") json.dump(js,f) f.close() @property def signers(self): # get the", ".order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\")", "memo be cancled by the delegate. Only drafts memos can", "def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if user", "signed return status['is_signer'] def has_access(self, user=None): \"\"\"This function will return", "obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return False self.memo_state", "else: memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return", "memo_number,\\ version = memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution = memo.distribution,\\", "#current_app.logger.info(f\"Making Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f)", "nullable=False, default='') # The title of the memo num_files =", "@staticmethod def get_next_number(user=None): assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if", "False,\\ distribution = '',\\ keywords = '',\\ title = '',\\", "def signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for", "###################################################################### # ###################################################################### def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number == self.number)\\", "machine functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None:", "None or a new Memo if the owner/delgate and revise", "return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title !=", "db.Column(db.DateTime) # when the memo was moved to active state", "= Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def get_next_number(user=None): assert", "= datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into signoff\") else:", "functions called by the viewcontroller ################################################################################ # Owner Function @staticmethod", "[] for ref in refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if", "confidential = False,\\ distribution = '',\\ keywords = '',\\ title", "user == None: return None msigs = MemoSignature.get_signatures(user,signed=False) memolist =", "(i.e. not a new version of an existing memo) if", "False if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete: return", "self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") if self.memo_state == MemoState.Signoff: if", "Memo(number = memo_number,\\ version = 'A',\\ confidential = False,\\ distribution", "User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### # ######################################################################", "the memo was created submit_date = db.Column(db.DateTime) # when the", "the distribution keywords = db.Column(db.String(128), default='') # any keyword title", "can_obsolete(self, delegate=None): \"\"\" Can this memo be obsoleted by the", "= [] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '':", "self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active or self.memo_state ==", "= User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names):", "can this memo be rejected by the delegate. Only memos", "= Memo.get_next_number(owner) new_memo = Memo(number = memo_number,\\ version = 'A',\\", "False if not signer.is_delegate(delegate=delegate): return False # The list of", "{username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None: memoQry.filter_by(version=memo_version) memo", "default='') # user names on the distribution keywords = db.Column(db.String(128),", "the delgate allowed to update \"this\" memo?\"\"\" if delegate is", "user_id = owner.username,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date", "be obsoleted by the delegate? Only active memos can be", "if owner.is_delegate(delegate) != True: return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() #", "memo was moved to active state (from submitted) obsolete_date =", "Permission Functions ######################################## @staticmethod def can_create(owner=None, delegate=None): \"\"\"Will return true", "delgate allowed to update \"this\" memo?\"\"\" if delegate is None:", "can_reject(self, signer=None, delegate=None): \"\"\" can this memo be rejected by", "delegate can create a memo for the owner\"\"\" if owner", "flask import current_app from memos import db from memos.models.User import", "to a \"memo\" and all of the \"memos\" \"\"\" id", "is None: delegate = owner return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None):", "self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting)", "owner/delgate and revise this memo \"\"\" assert owner != None", "something better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False", "= memo_number,\\ version = 'A',\\ confidential = False,\\ distribution =", "self.save() # TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\")", "references memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete def", "memo.keywords,\\ title = memo.title,\\ num_files = 0,\\ user_id = memo.user_id,\\", "get_files(self): \"\"\" Return a list of the files attached to", "a string of the references refs = MemoReference.get_refs(self) rval =", "Return a list of the files attached to this memo\"\"\"", "not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active or self.memo_state", "happens def process_state(self,acting=None): if self.memo_state == MemoState.Draft: if MemoSignature.status(self.id) ==", "False ######################################## # ??? Functions ######################################## def get_fullpath(self): \"\"\" This", "this memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self):", "continue parts = Memo.parse_reference(memo_ref) if len(parts) > 3 or len(parts)", "{self} Delegate={delegate}\") memostring = f\"{self}\" if not self.can_cancel(delegate=delegate): return False", "model file for a Memo \"\"\" import re import os", "from memos.revletter import b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This class is", "in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '': continue parts = Memo.parse_reference(memo_ref)", "True if user.readAll: return True # if the username is", "{message}\") # TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################", "owner != None and delegate != None if owner ==", "table status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not status['status'] def", "if delegate is None: return False if self.memo_state != MemoState.Draft:", "used to process the state # these function would classiavally", "distribution = db.Column(db.String(128), default='') # user names on the distribution", "False self.memo_state = MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date = datetime.utcnow()", "of signers and if they have signed are kept in", "last time anything happened create_date = db.Column(db.DateTime) # when the", "\"\"\" if delegate is None: return False if not self.user.is_delegate(delegate):", "None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has", "# do custom initialization here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def", "nullable=False, default=datetime.utcnow) # The last time anything happened create_date =", "from memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity import MemoActivity from memos.revletter", "the JSON file which is a copy of all of", "current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to", "for ref in refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2]", "True def can_reject(self, signer=None, delegate=None): \"\"\" can this memo be", "alway have access to your own memo's if self.user.username ==", "title = db.Column(db.String(128), nullable=False, default='') # The title of the", "self.action_date = datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True", "cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring = f\"{self}\" if not self.can_cancel(delegate=delegate):", "= MemoReference.get_refs(self) rval = [] for ref in refs: userid=ref[0]", "self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}\") return", "user == None: return None memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "new_memo if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a draft memo {memo}\")", "the viewcontroller ################################################################################ # Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\"", "a username if user is None: return False # you", "def signers(self): # get the signers from the signing table", "can read distribution = db.Column(db.String(128), default='') # user names on", "MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state = MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo", "the save new_memo.signers = memo._signers # cannot be done until", "js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string']", "signature happens # 3- an unsign happens def process_state(self,acting=None): if", "user.readAll: return True # if the username is in the", "import datetime from flask import current_app from memos import db", "file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self): \"\"\"", "the memo _signers = db.Column(db.String(128),default='') # the hidden list of", "return MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number", "the files in that directory & the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self)", "have signed are kept in the MemoSignature table status =", "== None: return None msigs = MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page", "None self.active_date = None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save()", "_signers = db.Column(db.String(128),default='') # the hidden list of signer usernames", "user is None: return False # you alway have access", "the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return", "self.user.is_delegate(delegate=delegate): return False return True def can_reject(self, signer=None, delegate=None): \"\"\"", "allowed to update \"this\" memo?\"\"\" if delegate is None: return", "def __init__(self, **kwargs): super().__init__(**kwargs) # do custom initialization here def", "in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ###################################################################### @staticmethod def parse_reference(reference):", "= Memo(number = memo_number,\\ version = memo.get_next_version(),\\ confidential = memo.confidential,\\", "here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ########################################", "can_unsign(self, signer=None, delegate=None): \"\"\"Can this memo be unsigned by delegate", "Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def", "MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function is called when: #", "import os import shutil import json from datetime import datetime", "\"this\" memo?\"\"\" if delegate is None: return False if not", "\"\"\" Return the relative path of this memo \"\"\" path", "signoff\") else: self.memo_state = MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting)", "status['is_signer'] and status['status'] def can_obsolete(self, delegate=None): \"\"\" Can this memo", "memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username: memo_list =", "TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") # TODO:", "return True return False def can_cancel(self, delegate=None): \"\"\" can this", "= db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False) # if", "access to your own memo's if self.user.username == user.username: return", "memos.models.MemoReference import MemoReference from memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity import", "of the meta data \"\"\" js = {} js['title']=self.title js['number']=self.number", "self.submit_date = None self.active_date = None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate)", "\"\"\"This function will return True of the \"username\" has access", "num_files = 0,\\ user_id = memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date", "def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring = f\"{self}\" if not", "memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also known as 'A'", "db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user who owns", "if owner is None: return False if delegate is None:", "def get_relpath(self): \"\"\" Return the relative path of this memo", "must not be none\" if user == None: return None", "if self.user.username == user.username: return True if user.admin: return True", "memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function is called", "is an id assigned by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return", "revise an existing memo new_memo = Memo(number = memo_number,\\ version", "list of signers and if they have signed are kept", "siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter", "distribution keywords = db.Column(db.String(128), default='') # any keyword title =", "this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self): \"\"\"", "return False # you alway have access to your own", "delegate=None): \"\"\" can this memo be cancled by the delegate.", "not self.user.is_delegate(delegate=delegate): return False return True def can_reject(self, signer=None, delegate=None):", "in the MemoSignature table status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and", "delegate for the signers\"\"\" if signer is None or delegate", "function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_sign(signer,delegate):", "is None: return False if self.memo_state != MemoState.Draft: return False", "memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\", "= Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None: memoQry.filter_by(version=memo_version) memo = memoQry.first()", "if not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active or", "by delegate for the signer \"\"\" if signer is None", "# delete all of the files in that directory &", "memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self): \"\"\"", "if self.memo_state != MemoState.Draft: return False if not self.user.is_delegate(delegate=delegate): return", "return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if", "can access if self.confidential == False: return True # at", "memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '': continue parts =", "################################################################################ # functions used to process the state # these", "== MemoState.Obsolete: return True def can_sign(self, signer=None, delegate=None): \"\"\"Can this", "user_id = memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date", "for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ###################################################################### @staticmethod", "length append {memo_ref} valid={valid_memos} invalid {invalid}\") continue username = parts[0]", "the signers from the signing table and turn it back", "None: return False # you alway have access to your", "delegate? Only active memos can be obsoleted \"\"\" if delegate", "is called when: # 1- a valid draft is created", "is a copy of all of the meta data \"\"\"", "of the \"username\" has access to self\"\"\" # if it", "owner == None or delegate == None: return None if", "get_next_number(user=None): assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list ==", "cannot be done until there is an id assigned by", "elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username:", "Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif memo_number: memo_list =", "memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete def __init__(self,", "memo be signed by delegate for the signers\"\"\" if signer", "= '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\") return", "of the files attached to this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all()", "memo_number = parts[1] memo_version = parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo", "self.can_cancel(delegate=delegate): return False MemoFile.delete(self) # delete all of the files", "reject.. even if you have already signed return status['is_signer'] def", "can this memo be cancled by the delegate. Only drafts", "valid draft is created # 2- a signature happens #", "A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False) # if true only author,", "{signer.username} by {delegate.username}\") return True ################################################################################ # End of State", "self.memo_state = MemoState.Draft self.action_date = datetime.utcnow() self.submit_date = None self.active_date", "sig.signer = User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def", "page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)} {memolist}\") return memolist @staticmethod", "datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers = '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)", "self.memo_state == MemoState.Draft: if MemoSignature.status(self.id) == False: self.memo_state = MemoState.Signoff", "False if delegate is None: delegate = owner return owner.is_delegate(delegate=delegate)", "delegate = owner return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is the", "import User from memos.models.MemoState import MemoState from memos.models.MemoFile import MemoFile", "= datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else:", "= Memo(number = memo_number,\\ version = 'A',\\ confidential = False,\\", "== MemoState.Active or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref)", "current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return False self.memo_state =", "MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been", "2- a signature happens # 3- an unsign happens def", "# signer function def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return False", "memo was moved to obsolete state (from active) user_id =", "= db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete def __init__(self, **kwargs):", "memo for the owner\"\"\" if owner is None: return False", "owner return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is the delgate allowed", "Required\") self.action_date = datetime.utcnow() self.save() # TODO: ARH def notify_distribution(self,message):", "signer you can reject.. even if you have already signed", "page,per_page=pagesize) if keywords != None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return False", "\"\"\"Can this memo be unsigned by delegate for the signer", "for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None: memoQry.filter_by(version=memo_version)", "def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if user", "memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return memo @staticmethod", "are a signer you can reject.. even if you have", "action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers = '' )", "of signer usernames _references = db.Column(db.String(128),default='') # The hidden list", "datetime.utcnow(),\\ ) new_memo.save() new_memo.references = memo.references['ref_string'] # cannot be done", "shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True #", "not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active: return True", "page,per_page=pagesize) elif memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif", "not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True #", "self.memo_state = MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version}", "return True # at this point we know it is", "Items={len(memolist.items)} {memolist}\") return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must", "= False,\\ distribution = '',\\ keywords = '',\\ title =", "self.confidential == False: return True # at this point we", "a list of refeference objects + a string of the", "the owner\"\"\" if owner is None: return False if delegate", "invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}\") continue username", "references refs = MemoReference.get_refs(self) rval = [] for ref in", "memo was created submit_date = db.Column(db.DateTime) # when the memo", "be rejected\"\"\" if signer is None or delegate is None:", "# you alway have access to your own memo's if", "= 0,\\ user_id = owner.username,\\ memo_state = MemoState.Draft,\\ action_date =", "def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## #", "self.can_reject(signer,delegate): return False self.memo_state = MemoState.Draft self.action_date = datetime.utcnow() self.submit_date", "memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found a draft memo {memo}\") return memo", "= None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected", "memo_list def saveJson(self): \"\"\" Create the JSON file which is", "memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if memo != None", "can_sign(self, signer=None, delegate=None): \"\"\"Can this memo be signed by delegate", "!= None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod", "memos.models.MemoSignature import MemoSignature from memos.models.MemoReference import MemoReference from memos.models.MemoHistory import", "if self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return", "they have signed are kept in the MemoSignature table status", "all of the \"memos\" \"\"\" id = db.Column(db.Integer, primary_key=True) number", "if self.memo_state == MemoState.Draft: if MemoSignature.status(self.id) == False: self.memo_state =", "self.memo_state != MemoState.Draft: return False if not self.user.is_delegate(delegate=delegate): return False", "import MemoState from memos.models.MemoFile import MemoFile from memos.models.MemoSignature import MemoSignature", "this memo be rejected by the delegate. Only memos in", "None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new memo (i.e.", "# The list of signers and if they have signed", "MemoState.Active or self.memo_state == MemoState.Obsolete: return True def can_sign(self, signer=None,", "# create a new memo (i.e. not a new version", "return False if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete:", "\"\"\" This function will return None or a new Memo", "most recently submitted (from created) active_date = db.Column(db.DateTime) # when", "they must provide a username if user is None: return", "def process_state(self,acting=None): if self.memo_state == MemoState.Draft: if MemoSignature.status(self.id) == False:", "= db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user who", "memo_number,\\ version = 'A',\\ confidential = False,\\ distribution = '',\\", "from flask import current_app from memos import db from memos.models.User", "memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\"", "Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all() for memo in prev_list: if", "@staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if", "# if true only author, signer, distribution can read distribution", "None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords != None:", "list then provide access TODO: ARH do something better if", "{path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f =", "directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True", "MemoState.Obsolete: return True def can_sign(self, signer=None, delegate=None): \"\"\"Can this memo", "memos.models.MemoState import MemoState from memos.models.MemoFile import MemoFile from memos.models.MemoSignature import", "submit_date = db.Column(db.DateTime) # when the memo was most recently", "os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path =", "delegate != None if owner == None or delegate ==", "user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return MemoReference.get_back_refs(self) ######################################################################", "# if it is not confidential than anyone can access", "be rejected by the delegate. Only memos in signoff can", "MemoState.Signoff: return False if not signer.is_delegate(delegate): return False status =", "ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################ # State machine", "return False if self.memo_state == MemoState.Active: return True return False", "self.memo_state = MemoState.Active self.active_date = datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been", "single interface to a \"memo\" and all of the \"memos\"", "of files attached to the memo action_date = db.Column(db.DateTime, nullable=False,", "if not self.user.is_delegate(delegate=delegate): return False return True def can_reject(self, signer=None,", "a signature happens # 3- an unsign happens def process_state(self,acting=None):", "if true only author, signer, distribution can read distribution =", "true if the delegate can create a memo for the", "db.Column(db.DateTime) # when the memo was created submit_date = db.Column(db.DateTime)", "def can_unsign(self, signer=None, delegate=None): \"\"\"Can this memo be unsigned by", "return status['is_signer'] and not status['status'] def can_unsign(self, signer=None, delegate=None): \"\"\"Can", "all of the files in that directory & the directory", "owner.is_delegate(delegate) != True: return None memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create", "= db.Column(db.Boolean, default=False) # if true only author, signer, distribution", "invalid = [] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref ==", "in signoff can be rejected\"\"\" if signer is None or", "None: return None memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memolist", "\"\"\"Can this memo be signed by delegate for the signers\"\"\"", "return True def can_sign(self, signer=None, delegate=None): \"\"\"Can this memo be", "self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for file in self.get_files(): js['files'].append(file.filename)", "return False if self.memo_state != MemoState.Signoff: return False if not", "memos can be obsoleted \"\"\" if delegate is None: return", "memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self): \"\"\" Create the", "= memo._signers # cannot be done until there is an", "False if not self.user.is_delegate(delegate): return False if self.memo_state == MemoState.Active:", "memo_number = Memo.get_next_number(owner) new_memo = Memo(number = memo_number,\\ version =", "== MemoState.Active: return True return False def can_cancel(self, delegate=None): \"\"\"", "@staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will return None or", "so ... they must provide a username if user is", "None or memo==None: memo_number = Memo.get_next_number(owner) new_memo = Memo(number =", "the signer \"\"\" if signer is None or delegate is", "if they have signed are kept in the MemoSignature table", "@staticmethod def parse_reference(reference): parts = re.split(r'-',reference) if len(parts) == 2:", "def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") # TODO: ARH def", "if title != None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if", "to obsolete state (from active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) #", "def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_reject(signer,delegate): return", "parts.append(None) return parts @staticmethod def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos =", "Functions ######################################## @staticmethod def can_create(owner=None, delegate=None): \"\"\"Will return true if", "delegate is None: return False if self.memo_state != MemoState.Signoff: return", "if not signer.is_delegate(delegate): return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if", "open(path,\"w\") json.dump(js,f) f.close() @property def signers(self): # get the signers", "= [] valid_refs = [] invalid = [] for memo_ref", "these function would classiavally be called private ################################################################################ def obsolete_previous(self,acting=None):", "refs = Memo.valid_references(references) for i in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i])", "string and a list siglist = MemoSignature.get_signers(self) for sig in", "1 return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not", "refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references = references", "attached to the memo action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) #", "can be obsoleted \"\"\" if delegate is None: return False", "(memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else:", "self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into signoff\")", "return False status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status'] def", "@staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if", "draft memo {memo}\") return memo # revise an existing memo", "{memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also known", "signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer)", "the delegate. Only memos in signoff can be rejected\"\"\" if", "refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references", "for the owner\"\"\" if owner is None: return False if", "parts = Memo.parse_reference(memo_ref) if len(parts) > 3 or len(parts) <", "existing memo new_memo = Memo(number = memo_number,\\ version = memo.get_next_version(),\\", "is not confidential than anyone can access if self.confidential ==", "a new memo (i.e. not a new version of an", "# at this point we know it is confidential so", "######################################## @staticmethod def can_create(owner=None, delegate=None): \"\"\"Will return true if the", "kept in the MemoSignature table status = MemoSignature.is_signer(self.id,signer) return status['is_signer']", "memo (i.e. not a new version of an existing memo)", "== False: return True # at this point we know", "number = db.Column(db.Integer) # Memo Number version = db.Column(db.String) #", "# ###################################################################### def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first()", "status['is_signer'] and not status['status'] def can_unsign(self, signer=None, delegate=None): \"\"\"Can this", "return True if user.admin: return True if user.readAll: return True", "__str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions ######################################## @staticmethod def", "> 3 or len(parts) < 2: invalid.append(memo_ref) current_app.logger.info(f\"INVALID length append", "valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid}", "= Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if", "self.active_date = None self.obsolete_date = None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo", "if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize)", "js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[]", "Draft, Signoff, Active, Obsolete def __init__(self, **kwargs): super().__init__(**kwargs) # do", "js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for file", "title={title}\") if title != None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "can be rejected\"\"\" if signer is None or delegate is", "MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer =", "parts[0] memo_number = parts[1] memo_version = parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version)", "hidden list of references memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff,", "append {memo_ref} valid={valid_memos} invalid {invalid}\") continue username = parts[0] memo_number", "file in self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory {path}\")", "active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the", "self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return False", "memo be rejected by the delegate. Only memos in signoff", "rejected by the delegate. Only memos in signoff can be", "references refs = Memo.valid_references(references) for i in range(len(refs['valid_refs'])): parsed_ref =", "memo.distribution,\\ keywords = memo.keywords,\\ title = memo.title,\\ num_files = 0,\\", "a copy of all of the meta data \"\"\" js", "return b10_to_rev(1) # also known as 'A' def save(self): db.session.add(self)", "= db.Column(db.DateTime) # when the memo was created submit_date =", "if delegate is None: delegate = owner return owner.is_delegate(delegate=delegate) def", "signers {message}\") ################################################################################ # State machine functions called by the", "been published\") if self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state =", "not be none\" if user == None: return None memolist", "#current_app.logger.info(f\"Making Directory {path}\") os.makedirs(path,exist_ok=True) #current_app.logger.info(f\"Making Succeeded {path}\") path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\")", "False # you alway have access to your own memo's", "in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property", "= page,per_page=pagesize) return memo_list @staticmethod def get_next_number(user=None): assert user!=None memo_list", "who owns the memo _signers = db.Column(db.String(128),default='') # the hidden", "default=datetime.utcnow) # The last time anything happened create_date = db.Column(db.DateTime)", "distribution = '',\\ keywords = '',\\ title = '',\\ num_files", "return False if not signer.is_delegate(delegate=delegate): return False status = MemoSignature.is_signer(self.id,signer)", "class Memo(db.Model): \"\"\"This class is the single interface to a", "User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers", "= User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist} @signers.setter def signers(self,signer_names): self._signers = signer_names", "active state (from submitted) obsolete_date = db.Column(db.DateTime) # when the", "Only drafts memos can be canceled\"\"\" if delegate is None:", "memo) if memo_number == None or memo==None: memo_number = Memo.get_next_number(owner)", "\"memos\" \"\"\" id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) #", "version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean, default=False) #", "{self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}\") return True", "if the username is in the distribution list then provide", "for sig in siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id)", "the memo was most recently submitted (from created) active_date =", "MemoSignature table status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not status['status']", "notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################ # State machine functions called", "created # 2- a signature happens # 3- an unsign", "b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also known as 'A' def save(self):", "{invalid}\") continue username = parts[0] memo_number = parts[1] memo_version =", "to sign\") return False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate)", "def parse_reference(reference): parts = re.split(r'-',reference) if len(parts) == 2: parts.append(None)", "keywords = '',\\ title = '',\\ num_files = 0,\\ user_id", "f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions ######################################## @staticmethod def can_create(owner=None, delegate=None):", "saveJson(self): \"\"\" Create the JSON file which is a copy", "= db.Column(db.String(128), nullable=False, default='') # The title of the memo", "db.Column(db.Integer, default=0) # The number of files attached to the", "keywords = memo.keywords,\\ title = memo.title,\\ num_files = 0,\\ user_id", "False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True #", "can_create(owner=None, delegate=None): \"\"\"Will return true if the delegate can create", "names on the distribution keywords = db.Column(db.String(128), default='') # any", "status['is_signer'] def has_access(self, user=None): \"\"\"This function will return True of", "rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return rval @property def", "get_fullpath(self): \"\"\" This function gives the os path to a", "MemoState.Draft: if MemoSignature.status(self.id) == False: self.memo_state = MemoState.Signoff self.submit_date =", "= memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution = memo.distribution,\\ keywords =", "return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are a", "read distribution = db.Column(db.String(128), default='') # user names on the", "# signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if", "return status['is_signer'] def has_access(self, user=None): \"\"\"This function will return True", "classiavally be called private ################################################################################ def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number", "MemoState.Draft: return False if not self.user.is_delegate(delegate=delegate): return False return True", "save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################ # functions used to process", "= 'A',\\ confidential = False,\\ distribution = '',\\ keywords =", "= Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list =", "and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref)", "confidential than anyone can access if self.confidential == False: return", "'' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\") return new_memo", "existing memo) if memo_number == None or memo==None: memo_number =", "db.session.commit() self.saveJson() ################################################################################ # functions used to process the state", "Distribution {self.distribution} {message}\") # TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers", "f = open(path,\"w\") json.dump(js,f) f.close() @property def signers(self): # get", "MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username}", "own memo's if self.user.username == user.username: return True if user.admin:", "memos.models.MemoActivity import MemoActivity from memos.revletter import b10_to_rev, rev_to_b10 class Memo(db.Model):", "current_app.logger.debug(f\"Found Memo id={memo}\") return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version:", "active_date = db.Column(db.DateTime) # when the memo was moved to", "= datetime.utcnow() self.submit_date = None self.active_date = None self.obsolete_date =", "if ref[2] == None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references}", "signers(self,signer_names): self._signers = signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for signer", "memo._signers # cannot be done until there is an id", "The number of files attached to the memo action_date =", "signer.is_delegate(delegate=delegate): return False # The list of signers and if", "re import os import shutil import json from datetime import", "memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\"", "by delegate for the signers\"\"\" if signer is None or", "db.Column(db.String(128), nullable=False, default='') # The title of the memo num_files", "MemoFile.delete(self) # delete all of the files in that directory", "be canceled\"\"\" if delegate is None: return False if self.memo_state", "self.version).all() for memo in prev_list: if memo.memo_state == MemoState.Active: memo.memo_state", "list of references memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active,", "and turn it back to a string and a list", "delegate=None): \"\"\" Can this memo be obsoleted by the delegate?", "get_next_version(self): memo = Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number}", "{memolist}\") return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not", "# TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") #", "!= MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return False #", "or a new Memo if the owner/delgate and revise this", "return new_memo # signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer}", "confidential so ... they must provide a username if user", "signoff can be rejected\"\"\" if signer is None or delegate", "new_memo # signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\")", "datetime.utcnow() self.save() # TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution}", "memos import db from memos.models.User import User from memos.models.MemoState import", "@references.setter def references(self,references): self._references = references refs = Memo.valid_references(references) for", "memos.models.User import User from memos.models.MemoState import MemoState from memos.models.MemoFile import", "\"\"\"Is the delgate allowed to update \"this\" memo?\"\"\" if delegate", "username = parts[0] memo_number = parts[1] memo_version = parts[2] memo", "default=False) # if true only author, signer, distribution can read", "in that directory & the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel)", "signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not", "return True # Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\")", "\"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self): \"\"\" Return", "memo?\"\"\" if delegate is None: return False if not self.user.is_delegate(delegate):", "if user.readAll: return True # if the username is in", "datetime.utcnow() self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures", "memo new_memo = Memo(number = memo_number,\\ version = memo.get_next_version(),\\ confidential", "= Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new memo (i.e. not a", "even if you have already signed return status['is_signer'] def has_access(self,", "was created submit_date = db.Column(db.DateTime) # when the memo was", "= db.Column(db.DateTime) # when the memo was moved to active", "owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is the delgate allowed to update", "True return False ######################################## # ??? Functions ######################################## def get_fullpath(self):", "memos.revletter import b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This class is the", "prev_list: if memo.memo_state == MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save()", "Memo \"\"\" import re import os import shutil import json", "status['status'] def can_obsolete(self, delegate=None): \"\"\" Can this memo be obsoleted", "if self.memo_state == MemoState.Active: return True return False def can_cancel(self,", "= {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return rval @property def references(self):", "self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\")", "self.number,Memo.version != self.version).all() for memo in prev_list: if memo.memo_state ==", "\"\"\" assert owner != None and delegate != None if", "def save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################ # functions used to", "delegate=None): \"\"\"Is the delgate allowed to update \"this\" memo?\"\"\" if", "return True # if the username is in the distribution", "if memo_id != None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\")", "memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if", "path of this memo \"\"\" path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path", "assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list == None:", "rejected\"\"\" if signer is None or delegate is None: return", "!= None: memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return", "= os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self): \"\"\" Return the relative", "return a list of refeference objects + a string of", "True if user.admin: return True if user.readAll: return True #", "memo_list @staticmethod def get_next_number(user=None): assert user!=None memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first()", "not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\") return False current_app.logger.info(\"allowed to", "to self\"\"\" # if it is not confidential than anyone", "is None: return False if self.memo_state != MemoState.Signoff: return False", "db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) # Memo Number version =", "??? Functions ######################################## def get_fullpath(self): \"\"\" This function gives the", "memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\"", "\"\"\" can this memo be cancled by the delegate. Only", "Memo Number version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential = db.Column(db.Boolean,", "Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if memo != None and (memo.memo_state", "memo.save() # This function is called when: # 1- a", "drafts memos can be canceled\"\"\" if delegate is None: return", "!= MemoState.Signoff: return False if not signer.is_delegate(delegate): return False status", "to the memo action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The", "= db.Column(db.String(128),default='') # The hidden list of references memo_state =", "datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into signoff\") else: self.memo_state", "class is the single interface to a \"memo\" and all", "None: refstring=f\"{userid}-{ref[1]}\" else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references):", "delegate=None): \"\"\"Will return true if the delegate can create a", "was most recently submitted (from created) active_date = db.Column(db.DateTime) #", "None MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate) MemoSignature.unsign_all(self) self.save() self.notify_signers(f\"Memo {self.user.username}-{self.number}-{self.version} has been rejected for", "get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page =", "or delegate is None: return False if self.memo_state != MemoState.Signoff:", "username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else:", "invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return rval @property", "memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords != None: memo_list", "do something better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return", "return False MemoFile.delete(self) # delete all of the files in", "def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions ######################################## @staticmethod", "owner is None: return False if delegate is None: delegate", "@staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND:", "= Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all() for memo in prev_list:", "or self.memo_state == MemoState.Obsolete: return True def can_sign(self, signer=None, delegate=None):", "can be canceled\"\"\" if delegate is None: return False if", "provide a username if user is None: return False #", "self.action_date = datetime.utcnow() self.save() # TODO: ARH def notify_distribution(self,message): current_app.logger.info(F\"Notify", "function would classiavally be called private ################################################################################ def obsolete_previous(self,acting=None): prev_list", "MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True # signer function def unsign(self,signer=None,delegate=None): if", "called when: # 1- a valid draft is created #", "published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\") self.action_date = datetime.utcnow()", "from memos.models.MemoReference import MemoReference from memos.models.MemoHistory import MemoHistory from memos.models.MemoActivity", "path def get_relpath(self): \"\"\" Return the relative path of this", "memo_number: memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username: memo_list", "None: return False if self.memo_state != MemoState.Signoff: return False if", "################################################################################ def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all()", "new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer", "Only active memos can be obsoleted \"\"\" if delegate is", "None and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo)", "the hidden list of signer usernames _references = db.Column(db.String(128),default='') #", "= parts[1] memo_version = parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo =", "created submit_date = db.Column(db.DateTime) # when the memo was most", "return status['is_signer'] and status['status'] def can_obsolete(self, delegate=None): \"\"\" Can this", "an id assigned by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo", "title = '',\\ num_files = 0,\\ user_id = owner.username,\\ memo_state", "memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs,", "= MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ ) new_memo.save()", "f\"{self.user.username}-{self.number}{self.version}\" def __str__(self): return f\"{self.user.username}-{self.number}{self.version}\" ######################################## # Permission Functions ########################################", "current_app.logger.info(f\"Canceling\") return True # signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer =", "user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user", "draft is created # 2- a signature happens # 3-", "new_memo.save() new_memo.references = memo.references['ref_string'] # cannot be done until there", "signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not", "# Memo Number version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential =", "Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version", "memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list", "None: return False if delegate is None: delegate = owner", "page,per_page=pagesize) return memo_list @staticmethod def get_next_number(user=None): assert user!=None memo_list =", "username is in the distribution list then provide access TODO:", "'valid_memos' : valid_memos,'invalid':invalid} return rval @property def references(self): # this", "notify_distribution(self,message): current_app.logger.info(F\"Notify Distribution {self.distribution} {message}\") # TODO: ARH def notify_signers(self,message):", "id assigned by the save new_memo.signers = memo._signers # cannot", "= db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything happened", "memo_list = Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list == None: return 1", "= '',\\ title = '',\\ num_files = 0,\\ user_id =", "unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return", "# State machine functions called by the viewcontroller ################################################################################ #", "= User.valid_usernames(signer_names) for signer in users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References", "= MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save()", "= '',\\ num_files = 0,\\ user_id = owner.username,\\ memo_state =", "This function gives the os path to a file \"\"\"", "self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\") self.action_date = datetime.utcnow() self.save() #", "for i in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0])", "# when the memo was most recently submitted (from created)", "from memos.models.MemoState import MemoState from memos.models.MemoFile import MemoFile from memos.models.MemoSignature", "this memo \"\"\" assert owner != None and delegate !=", "State machine functions called by the viewcontroller ################################################################################ # Owner", "in siglist: sig.signer = User.find(username=sig.signer_id) sig.delegate = User.find(username=sig.delegate_id) return {'signers':self._signers,'siglist':siglist}", "@property def backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def get_next_version(self):", "page,per_page=pagesize) elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page =", "= Items={len(memolist.items)} {memolist}\") return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User", "path = os.path.join(path,f\"meta-{self.user_id}-{self.number}-{self.version}.json\") f = open(path,\"w\") json.dump(js,f) f.close() @property def", "db.Column(db.String(128), default='') # user names on the distribution keywords =", "return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None): assert user!=None,\"User must not be", "+ a string of the references refs = MemoReference.get_refs(self) rval", "you have already signed return status['is_signer'] def has_access(self, user=None): \"\"\"This", "delegate=None): \"\"\"Can this memo be unsigned by delegate for the", "MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit() current_app.logger.info(f\"Canceling\") return True # signer function def", "key of the user who owns the memo _signers =", "if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also known as", "if owner == None or delegate == None: return None", "@staticmethod def can_create(owner=None, delegate=None): \"\"\"Will return true if the delegate", "False if self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate):", "and delegate != None if owner == None or delegate", "only author, signer, distribution can read distribution = db.Column(db.String(128), default='')", "obsolete_date = db.Column(db.DateTime) # when the memo was moved to", "= memo.references['ref_string'] # cannot be done until there is an", "def can_create(owner=None, delegate=None): \"\"\"Will return true if the delegate can", "# Owner Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not", "if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\") return False current_app.logger.info(\"allowed", "memo_number == None or memo==None: memo_number = Memo.get_next_number(owner) new_memo =", "import re import os import shutil import json from datetime", "ref in refs: userid=ref[0] memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2]) if ref[2] ==", "User from memos.models.MemoState import MemoState from memos.models.MemoFile import MemoFile from", "delete all of the files in that directory & the", "Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state", "list of refeference objects + a string of the references", "MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def", "memo_list == None: return 1 return memo_list.number+1 @staticmethod def get_inbox(user=None,page=1,pagesize=None):", "else: self.memo_state = MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo", "the files attached to this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return", "import MemoActivity from memos.revletter import b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This", "self.memo_state == MemoState.Active: return True return False def can_cancel(self, delegate=None):", "# TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################ #", "Obsolete def __init__(self, **kwargs): super().__init__(**kwargs) # do custom initialization here", "user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False ######################################## # ???", "={references}') valid_memos = [] valid_refs = [] invalid = []", "return False # The list of signers and if they", "current_app.logger.info(f\"Search title={title}\") if title != None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page =", "The list of signers and if they have signed are", "The title of the memo num_files = db.Column(db.Integer, default=0) #", "if not self.can_reject(signer,delegate): return False self.memo_state = MemoState.Draft self.action_date =", "= MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not status['status'] def can_unsign(self, signer=None,", ".order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page", "self.memo_state == MemoState.Obsolete: return True def can_sign(self, signer=None, delegate=None): \"\"\"Can", "current_app.logger.info(\"NOT!!@ allowed to sign\") return False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate)", "function will return a list of refeference objects + a", "Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return False self.memo_state = MemoState.Obsolete self.action_date", "@staticmethod def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title != None: memo_list", "!= None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry =", "file for a Memo \"\"\" import re import os import", "return True of the \"username\" has access to self\"\"\" #", "of the memo num_files = db.Column(db.Integer, default=0) # The number", "functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id != None: return", "MemoState.Draft: current_app.logger.info(f\"Found a draft memo {memo}\") return memo # revise", "parts @staticmethod def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos = [] valid_refs", "this point we know it is confidential so ... they", "not status['status'] def can_unsign(self, signer=None, delegate=None): \"\"\"Can this memo be", "list of signer usernames _references = db.Column(db.String(128),default='') # The hidden", "delegate is None: return False if self.memo_state != MemoState.Draft: return", "= page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page =", "signer usernames _references = db.Column(db.String(128),default='') # The hidden list of", "None: memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found Memo id={memo}\") return memo", "Memo(number = memo_number,\\ version = memo.get_next_version(),\\ confidential = memo.confidential,\\ distribution", "Memo.query.filter_by(user_id=username,number=memo_number) if memo_version != None: memoQry.filter_by(version=memo_version) memo = memoQry.first() current_app.logger.debug(f\"Found", "of references memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete", "MemoHistory from memos.models.MemoActivity import MemoActivity from memos.revletter import b10_to_rev, rev_to_b10", "the \"username\" has access to self\"\"\" # if it is", "MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into signoff\") else: self.memo_state =", "os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_relpath(self): \"\"\" Return the relative path", "js['files']=[] for file in self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making", "there is an id assigned by the save new_memo.signers =", "assert user!=None,\"User must not be none\" if user == None:", "re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False ######################################## # ??? Functions ########################################", "**kwargs): super().__init__(**kwargs) # do custom initialization here def __repr__(self): return", "the os path to a file \"\"\" path = os.path.join(current_app.root_path,\"static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\")", "the signing table and turn it back to a string", "{memo}\") return memo # revise an existing memo new_memo =", "new_memo.signers = memo._signers # cannot be done until there is", "db.ForeignKey('user.username'),nullable=False) # The key of the user who owns the", "# 1- a valid draft is created # 2- a", "# Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel: {self} Delegate={delegate}\") memostring =", "= db.Column(db.Integer, default=0) # The number of files attached to", "delegate is None: delegate = owner return owner.is_delegate(delegate=delegate) def can_revise(self,", "# this function will return a list of refeference objects", "be none\" if user == None: return None memolist =", "######################################## def get_fullpath(self): \"\"\" This function gives the os path", "known as 'A' def save(self): db.session.add(self) db.session.commit() self.saveJson() ################################################################################ #", "'',\\ num_files = 0,\\ user_id = owner.username,\\ memo_state = MemoState.Draft,\\", "Memo id={memo}\") return memo @staticmethod def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None): if memo_version: memo_list", "datetime import datetime from flask import current_app from memos import", "active memos can be obsoleted \"\"\" if delegate is None:", "the delegate. Only drafts memos can be canceled\"\"\" if delegate", "signer.is_delegate(delegate=delegate): return False status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status']", "= 0,\\ user_id = memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date =", "memo_list = Memo.query.join(User).filter(User.username==username,\\ Memo.number==memo_number,\\ Memo.version==memo_version)\\ .paginate(page = page,per_page=pagesize) elif memo_number:", "= owner return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is the delgate", "import MemoHistory from memos.models.MemoActivity import MemoActivity from memos.revletter import b10_to_rev,", "signers = '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new memo {new_memo}\")", "MemoActivity from memos.revletter import b10_to_rev, rev_to_b10 class Memo(db.Model): \"\"\"This class", "status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are a signer you", "######################################## # Permission Functions ######################################## @staticmethod def can_create(owner=None, delegate=None): \"\"\"Will", "status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and not status['status'] def can_unsign(self,", "False if not self.user.is_delegate(delegate=delegate): return False return True def can_reject(self,", "memo was most recently submitted (from created) active_date = db.Column(db.DateTime)", "delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed to sign\") return False", "super().__init__(**kwargs) # do custom initialization here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\"", "refs = MemoReference.get_refs(self) rval = [] for ref in refs:", "\"\"\" id = db.Column(db.Integer, primary_key=True) number = db.Column(db.Integer) # Memo", "reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_reject(signer,delegate): return False", "JSON file which is a copy of all of the", "return False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign) self.process_state(acting=delegate) return True", "it is confidential so ... they must provide a username", "prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all() for memo in", "MemoState.Active or memo.memo_state == MemoState.Obsolete): valid_memos.append(memo) valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval", "== 2: parts.append(None) return parts @staticmethod def valid_references(references): current_app.logger.info(f'references ={references}')", "signed are kept in the MemoSignature table status = MemoSignature.is_signer(self.id,signer)", "do custom initialization here def __repr__(self): return f\"{self.user.username}-{self.number}{self.version}\" def __str__(self):", "when the memo was moved to active state (from submitted)", "= MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone", "submitted) obsolete_date = db.Column(db.DateTime) # when the memo was moved", "signer is None or delegate is None: return False if", "if user is None: return False # you alway have", "users['valid_users']: MemoSignature.add_signer(memo=self,signer=signer) ###################################################################### # References ###################################################################### @staticmethod def parse_reference(reference): parts", "allowed to sign\") return False current_app.logger.info(\"allowed to sign\") MemoSignature.sign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign)", "signer=None, delegate=None): \"\"\"Can this memo be unsigned by delegate for", "will return a list of refeference objects + a string", "to update \"this\" memo?\"\"\" if delegate is None: return False", "of refeference objects + a string of the references refs", "references(self,references): self._references = references refs = Memo.valid_references(references) for i in", "it back to a string and a list siglist =", "current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_reject(signer,delegate): return False self.memo_state", "db.Column(db.DateTime) # when the memo was most recently submitted (from", "get_inbox(user=None,page=1,pagesize=None): assert user!=None,\"User must not be none\" if user ==", "self.references['ref_string'] js['files']=[] for file in self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath())", "False return True def can_reject(self, signer=None, delegate=None): \"\"\" can this", "search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title != None: memo_list = Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page", "access if self.confidential == False: return True # at this", "MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self): \"\"\" Create the JSON file", "Memo.query.join(User).filter(User.username==user.username)\\ .order_by(Memo.number.desc()).first() if memo_list == None: return 1 return memo_list.number+1", "###################################################################### def get_next_version(self): memo = Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version", "= Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords != None: memo_list =", "else: current_app.logger.info(f\"Signatures Still Required\") self.action_date = datetime.utcnow() self.save() # TODO:", "for {user.username} = Items={len(memolist.items)} {memolist}\") return memolist @staticmethod def get_drafts(user=None,page=1,pagesize=None):", "the signers\"\"\" if signer is None or delegate is None:", "by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo # signer function", "update \"this\" memo?\"\"\" if delegate is None: return False if", "{memo_ref} valid={valid_memos} invalid {invalid}\") continue username = parts[0] memo_number =", "new memo {new_memo}\") return new_memo if memo.memo_state == MemoState.Draft: current_app.logger.info(f\"Found", "not self.can_reject(signer,delegate): return False self.memo_state = MemoState.Draft self.action_date = datetime.utcnow()", "we know it is confidential so ... they must provide", "status = MemoSignature.is_signer(self.id,signer) return status['is_signer'] and status['status'] def can_obsolete(self, delegate=None):", "msigs = MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for", "invalid {invalid}\") continue username = parts[0] memo_number = parts[1] memo_version", "memo_version = parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if", "return path def get_relpath(self): \"\"\" Return the relative path of", "if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False ######################################## #", "private ################################################################################ def obsolete_previous(self,acting=None): prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version !=", "None: delegate = owner return owner.is_delegate(delegate=delegate) def can_revise(self, delegate=None): \"\"\"Is", "elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "= db.Column(db.String(128), default='') # user names on the distribution keywords", "# when the memo was created submit_date = db.Column(db.DateTime) #", "user.admin: return True if user.readAll: return True # if the", "objects + a string of the references refs = MemoReference.get_refs(self)", "delegate. Only drafts memos can be canceled\"\"\" if delegate is", "until there is an id assigned by the save new_memo.signers", "memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ )", "db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything happened create_date", "def get_fullpath(self): \"\"\" This function gives the os path to", "was moved to obsolete state (from active) user_id = db.Column(db.String(120),", "!= MemoState.Draft: return False if not self.user.is_delegate(delegate=delegate): return False return", "the owner/delgate and revise this memo \"\"\" assert owner !=", "the distribution list then provide access TODO: ARH do something", "= MemoState.Active self.active_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has", "current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1)", "Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will return", "if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete: return True", "(from created) active_date = db.Column(db.DateTime) # when the memo was", "and revise this memo \"\"\" assert owner != None and", "anything happened create_date = db.Column(db.DateTime) # when the memo was", "# This function is called when: # 1- a valid", "db.Column(db.Boolean, default=False) # if true only author, signer, distribution can", "def references(self): # this function will return a list of", "is the single interface to a \"memo\" and all of", "MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return False status =", "= {signer} delegate={delegate}\") if not self.can_reject(signer,delegate): return False self.memo_state =", "db.session.add(self) db.session.commit() self.saveJson() ################################################################################ # functions used to process the", "in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True return False ######################################## # ??? Functions", "def search(title=None,keywords=None,page=1,pagesize=None): current_app.logger.info(f\"Search title={title}\") if title != None: memo_list =", "return False return True def can_reject(self, signer=None, delegate=None): \"\"\" can", "of the references refs = MemoReference.get_refs(self) rval = [] for", "function gives the os path to a file \"\"\" path", "valid_refs.append(memo_ref) else: invalid.append(memo_ref) rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid} return", "current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number) if memo_version !=", "os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self): \"\"\" Return a list of", "primary_key=True) number = db.Column(db.Integer) # Memo Number version = db.Column(db.String)", "Memo.query.join(User).filter(Memo.number == self.number)\\ .order_by(Memo.version.desc()).first() current_app.logger.info(f\"get_next_version {memo.id} {memo.number} {memo.version}\") if memo:", "MemoState.Signoff self.submit_date = datetime.utcnow() MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting) self.notify_signers(f\"memo {self.user.username}-{self.number}-{self.version} has gone into", "i in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2])", "= Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return", "directory & the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self) db.session.commit()", "None: return False if self.memo_state != MemoState.Draft: return False if", "True # at this point we know it is confidential", "user.username: return True if user.admin: return True if user.readAll: return", "that directory & the directory shutil.rmtree(self.get_fullpath()) MemoReference.delete(self) MemoSignature.delete_signers(self) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel) db.session.delete(self)", "point we know it is confidential so ... they must", "return True def can_reject(self, signer=None, delegate=None): \"\"\" can this memo", "return False self.memo_state = MemoState.Draft self.action_date = datetime.utcnow() self.submit_date =", "MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) else: memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\\", "!= None and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete):", "False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are a signer", "= Memo.parse_reference(memo_ref) if len(parts) > 3 or len(parts) < 2:", "== None: return None memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return", "will return None or a new Memo if the owner/delgate", "self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate) return True # Owner", "new Memo if the owner/delgate and revise this memo \"\"\"", "a list of the files attached to this memo\"\"\" memo_list", "memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)}", "created) active_date = db.Column(db.DateTime) # when the memo was moved", "if memo_ref == '': continue parts = Memo.parse_reference(memo_ref) if len(parts)", "def backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### # ###################################################################### def get_next_version(self): memo", "published\") if self.memo_state == MemoState.Signoff: if MemoSignature.status(self.id): self.memo_state = MemoState.Active", "signer.is_delegate(delegate): return False status = MemoSignature.is_signer(memo_id=self.id,signer=signer) # if you are", "data \"\"\" js = {} js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution", "save new_memo.signers = memo._signers # cannot be done until there", "self.memo_state != MemoState.Signoff: return False if not signer.is_delegate(delegate): return False", "obsoleted by the delegate? Only active memos can be obsoleted", "Function def obsolete(self,delegate=None): current_app.logger.info(f\"Obsolete: {self} Delegate={delegate}\") if not self.can_obsolete(delegate=delegate): return", "delegate=None): \"\"\" can this memo be rejected by the delegate.", "else: refstring=f\"{userid}-{ref[1]}-{ref[2]}\" rval.append((refstring,memo)) return {'reflist':rval,'ref_string':self._references} @references.setter def references(self,references): self._references =", "state (from active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key", "distribution can read distribution = db.Column(db.String(128), default='') # user names", "= signer_names MemoSignature.delete_signers(self) users = User.valid_usernames(signer_names) for signer in users['valid_users']:", "MemoReference.get_refs(self) rval = [] for ref in refs: userid=ref[0] memo", "confidential = db.Column(db.Boolean, default=False) # if true only author, signer,", "parts = re.split(r'-',reference) if len(parts) == 2: parts.append(None) return parts", "new_memo = Memo(number = memo_number,\\ version = 'A',\\ confidential =", "== user.username: return True if user.admin: return True if user.readAll:", "new memo (i.e. not a new version of an existing", "can_revise(self, delegate=None): \"\"\"Is the delgate allowed to update \"this\" memo?\"\"\"", "when the memo was moved to obsolete state (from active)", "= page,per_page=pagesize) elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page", "= page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} = Items={len(memolist.items)} {memolist}\") return memolist", "Memo.query.filter(Memo.title.like(f\"%{title}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) if keywords != None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page", "if signer is None or delegate is None: return False", "get the signers from the signing table and turn it", "the single interface to a \"memo\" and all of the", "you are a signer you can reject.. even if you", "{memo.number} {memo.version}\") if memo: return b10_to_rev(rev_to_b10(memo.version)+1) return b10_to_rev(1) # also", "author, signer, distribution can read distribution = db.Column(db.String(128), default='') #", "= User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self): return MemoReference.get_back_refs(self) ###################################################################### #", "memo in prev_list: if memo.memo_state == MemoState.Active: memo.memo_state = MemoState.Obsolete", "process_state(self,acting=None): if self.memo_state == MemoState.Draft: if MemoSignature.status(self.id) == False: self.memo_state", "obsolete state (from active) user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The", "# if the username is in the distribution list then", "create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will return None or a new", "Memo.get_next_number(owner) new_memo = Memo(number = memo_number,\\ version = 'A',\\ confidential", "js['title']=self.title js['number']=self.number js['version']=self.version js['confidential']=self.confidential js['distribution']=self.distribution js['keywords']=self.keywords js['userid']=self.user_id js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords", "for file in self.get_files(): js['files'].append(file.filename) path = os.path.join(self.get_fullpath()) #current_app.logger.info(f\"Making Directory", "= memo.user_id,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\ create_date =", "datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True # Owner Function def cancel(self,delegate=None):", "= memo.title,\\ num_files = 0,\\ user_id = memo.user_id,\\ memo_state =", "None: return Memo.query.filter_by(id=memo_id).first() current_app.logger.debug(f\"FIND: Looking for {username}/{memo_number}/{memo_version}\") memoQry = Memo.query.filter_by(user_id=username,number=memo_number)", "# functions used to process the state # these function", "current_app from memos import db from memos.models.User import User from", "of State machine functions ################################################################################ @staticmethod def find(memo_id=None,username=None,memo_number=None,memo_version=None): if memo_id", "import json from datetime import datetime from flask import current_app", "memo.title,\\ num_files = 0,\\ user_id = memo.user_id,\\ memo_state = MemoState.Draft,\\", "= datetime.utcnow(),\\ create_date = datetime.utcnow(),\\ signers = '' ) new_memo.save()", "db.Column(db.Integer) # Memo Number version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA confidential", "access to self\"\"\" # if it is not confidential than", "###################################################################### @staticmethod def parse_reference(reference): parts = re.split(r'-',reference) if len(parts) ==", "if user == None: return None memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page =", "[] for memo_ref in re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '': continue", "is None: return False if delegate is None: delegate =", "True ################################################################################ # End of State machine functions ################################################################################ @staticmethod", "are kept in the MemoSignature table status = MemoSignature.is_signer(self.id,signer) return", "which is a copy of all of the meta data", "= Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) elif username: memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state", "parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if memo !=", "memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first() # create a new memo (i.e. not", "def unsign(self,signer=None,delegate=None): if not self.can_unsign(signer,delegate): return False MemoSignature.unsign(self.id,signer,delegate) MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign) self.process_state(acting=delegate)", "= db.Column(db.DateTime) # when the memo was moved to obsolete", "db from memos.models.User import User from memos.models.MemoState import MemoState from", "None: return False if not self.user.is_delegate(delegate): return False if self.memo_state", "is an id assigned by the save new_memo.signers = memo._signers", "to this memo\"\"\" memo_list = MemoFile.query.filter_by(memo_id=self.id).all() return memo_list def saveJson(self):", "memo.references['ref_string'] # cannot be done until there is an id", "function is called when: # 1- a valid draft is", "is confidential so ... they must provide a username if", "been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still Required\") self.action_date =", "continue username = parts[0] memo_number = parts[1] memo_version = parts[2]", "True # signer function def reject(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\")", "MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) current_app.logger.info(f\"Inbox for {user.username} =", "The last time anything happened create_date = db.Column(db.DateTime) # when", "sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if not self.can_sign(signer,delegate): current_app.logger.info(\"NOT!!@ allowed", "0,\\ user_id = owner.username,\\ memo_state = MemoState.Draft,\\ action_date = datetime.utcnow(),\\", "if keywords != None: memo_list = Memo.query.filter(Memo.keywords.like(f\"%{keywords}%\")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return", "be none\" if user == None: return None msigs =", "to a string and a list siglist = MemoSignature.get_signers(self) for", "################################################################################ # End of State machine functions ################################################################################ @staticmethod def", "self.can_obsolete(delegate=delegate): return False self.memo_state = MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date", "= datetime.utcnow(),\\ signers = '' ) new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) current_app.logger.info(f\"Creating new", "None if owner == None or delegate == None: return", "# signer function def sign(self,signer=None,delegate=None): current_app.logger.info(f\"signer = {signer} delegate={delegate}\") if", "self.notify_distribution(f\"memo {self.user.username}-{self.number}-{self.version} has been published\") MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting) self.obsolete_previous(acting=acting) else: current_app.logger.info(f\"Signatures Still", "# The hidden list of references memo_state = db.Column(db.Enum(MemoState)) #", "memo _signers = db.Column(db.String(128),default='') # the hidden list of signer", "memos.models.MemoFile import MemoFile from memos.models.MemoSignature import MemoSignature from memos.models.MemoReference import", "parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def backrefs(self):", "ARH do something better if user.username in re.split('\\s|\\,|\\t|\\;|\\:',self.distribution): return True", "MemoState.Active: memo.memo_state = MemoState.Obsolete MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting) memo.save() # This function is", "db.Column(db.String(128),default='') # the hidden list of signer usernames _references =", "of all of the meta data \"\"\" js = {}", "function will return None or a new Memo if the", "Memo.valid_references(references) for i in range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user =", "def can_sign(self, signer=None, delegate=None): \"\"\"Can this memo be signed by", "MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return True # Owner Function def cancel(self,delegate=None): current_app.logger.info(f\"Cancel:", "== MemoState.Active)\\ .order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize) return memo_list @staticmethod def search(title=None,keywords=None,page=1,pagesize=None):", "TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\") ################################################################################ # State", "unsign happens def process_state(self,acting=None): if self.memo_state == MemoState.Draft: if MemoSignature.status(self.id)", "= Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\") if memo != None and", "return None msigs = MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)", "for {signer.username} by {delegate.username}\") return True ################################################################################ # End of", "\"\"\" Create the JSON file which is a copy of", "the memo was moved to obsolete state (from active) user_id", "MemoState.Signoff: return False if not signer.is_delegate(delegate=delegate): return False # The", "# Owner Function @staticmethod def create_revise(owner=None,delegate=None,memo_number=None): \"\"\" This function will", "range(len(refs['valid_refs'])): parsed_ref = Memo.parse_reference(refs['valid_refs'][i]) user = User.find(username=parsed_ref[0]) MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2]) @property def", "MemoSignature from memos.models.MemoReference import MemoReference from memos.models.MemoHistory import MemoHistory from", "None: return None msigs = MemoSignature.get_signatures(user,signed=False) memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page =", "# The last time anything happened create_date = db.Column(db.DateTime) #", "moved to active state (from submitted) obsolete_date = db.Column(db.DateTime) #", "js['memo_state']=f\"{self.memo_state}\" js['keywords']= self.keywords js['signers']=self.signers['signers'] js['references']= self.references['ref_string'] js['files']=[] for file in", "re.split(r'\\s|\\,|\\t|\\;|\\:',references): if memo_ref == '': continue parts = Memo.parse_reference(memo_ref) if", "path = os.path.join(\"/static\",\"memos\",f\"{self.user_id}\",f\"{self.number}\",f\"{self.version}\") return path def get_files(self): \"\"\" Return a", "id assigned by the save new_memo.save() MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate) return new_memo #", "f\"{self}\" if not self.can_cancel(delegate=delegate): return False MemoFile.delete(self) # delete all", "true only author, signer, distribution can read distribution = db.Column(db.String(128),", "import shutil import json from datetime import datetime from flask", "and status['status'] def can_obsolete(self, delegate=None): \"\"\" Can this memo be", "you can reject.. even if you have already signed return", "current_app.logger.info(F\"Notify signers {message}\") ################################################################################ # State machine functions called by", "parts[1] memo_version = parts[2] memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version) current_app.logger.info(f\"Memo = {memo}\")", "False if self.memo_state == MemoState.Active: return True return False def", "MemoState.Obsolete self.action_date = datetime.utcnow() self.obsolete_date = datetime.utcnow() MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete) self.save() return", "version of an existing memo) if memo_number == None or", "obsoleted \"\"\" if delegate is None: return False if not", "'': continue parts = Memo.parse_reference(memo_ref) if len(parts) > 3 or", "be obsoleted \"\"\" if delegate is None: return False if", "{self.distribution} {message}\") # TODO: ARH def notify_signers(self,message): current_app.logger.info(F\"Notify signers {message}\")", "@staticmethod def valid_references(references): current_app.logger.info(f'references ={references}') valid_memos = [] valid_refs =" ]
[ "\"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\"", "settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL", "as f: yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\"", "= \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL =", "settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL", "test fixtures\"\"\" import json import pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test", "settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return", "= \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings for", "= \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture def mitx_course_data():", "\"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture", "\"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\"", "settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL", "import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\"", "ETL test fixtures\"\"\" import json import pytest @pytest.fixture(autouse=True) def mitx_settings(settings):", "= \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings", "\"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield json.loads(f.read())", "import pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings for MITx import\"\"\"", "f: yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with", "oll_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET", "return settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings for MITx import\"\"\"", "settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture def", "\"\"\"Test settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET =", "MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL =", "MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL =", "settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL", "for MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL", "settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\"", "def mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as f:", "= \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL =", "\"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog", "\"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\"", "data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield json.loads(f.read()) @pytest.fixture", "json import pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings for MITx", "settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\")", "<filename>course_catalog/etl/conftest.py \"\"\"Common ETL test fixtures\"\"\" import json import pytest @pytest.fixture(autouse=True)", "json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\", \"r\")", "import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\"", "@pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID =", "\"r\") as f: yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data", "def oll_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\"", "settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings", "settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog data", "with open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data():", "def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\", \"r\") as f:", "open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog", "@pytest.fixture def mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as", "def mitx_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\"", "= \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings", "for MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL", "\"\"\"Common ETL test fixtures\"\"\" import json import pytest @pytest.fixture(autouse=True) def", "\"\"\"Test settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET =", "settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def", "fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield json.loads(f.read()) @pytest.fixture def", "fixtures\"\"\" import json import pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings", "settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL = \"http://localhost/fake/alt/url\" return", "settings.OLL_API_CLIENT_ID = \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL", "settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.OLL_API_CLIENT_ID", "\"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\"", "= \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL =", "pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID", "\"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True)", "yield json.loads(f.read()) @pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\",", "non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\", \"r\") as f: yield", "\"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test", "= \"http://localhost/fake/alt/url\" return settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog data fixture\"\"\"", "\"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL = \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\"", "import json import pytest @pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings for", "= \"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL =", "mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\", \"r\") as f: yield", "= \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL = \"http://localhost/fake/base/url\" settings.OLL_ALT_URL =", "return settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_mitx_course.json\",", "\"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\", \"r\") as f: yield json.loads(f.read())", "= \"http://localhost/fake/base/url\" settings.MITX_ALT_URL = \"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def oll_settings(settings):", "\"http://localhost/fake/alt/url\" return settings @pytest.fixture(autouse=True) def oll_settings(settings): \"\"\"Test settings for MITx", "@pytest.fixture(autouse=True) def mitx_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID =", "mitx_settings(settings): \"\"\"Test settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET", "@pytest.fixture def non_mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with open(\"./test_json/test_non_mitx_course.json\", \"r\") as", "\"http://localhost/fake/alt/url\" return settings @pytest.fixture def mitx_course_data(): \"\"\"Catalog data fixture\"\"\" with", "settings for MITx import\"\"\" settings.EDX_API_CLIENT_ID = \"fake-client-id\" settings.EDX_API_CLIENT_SECRET = \"fake-client-secret\"", "settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\" settings.OLL_BASE_URL", "\"fake-client-id\" settings.OLL_API_CLIENT_SECRET = \"fake-client-secret\" settings.OLL_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.OLL_API_URL = \"http://localhost/fake/api/url\"", "= \"fake-client-secret\" settings.EDX_API_ACCESS_TOKEN_URL = \"http://localhost/fake/access/token/url\" settings.EDX_API_URL = \"http://localhost/fake/api/url\" settings.MITX_BASE_URL =" ]
[ "it a little more unique. :param cpxTmp: complex value to", "and set attributes. :param kwargs: a dictionary in the form", "disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\" temp \"\"\"", "mirror is concatenate with the original to produce a new", "of iterations \"\"\" # Initialize numpy array of dimensions (size,", "cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ] # Randomly chose one colormap", "if kwargs in not empty if kwargs is not None:", "is not empty there is some invalid keywords if kwargs:", "= kwargs.pop('size', 256) if 'dpi' in kwargs: self.dpi = kwargs.pop('dpi',", "ix in range(self.size): # Get the pixel position in the", "complex values cpxList = [ (-0.10, 0.650), (0.00, 0.80), (0.370,", "= 1. - np.sqrt(it/self.niter) # Fill the outpout array julia[ix][iy]", "-0.05), (0.37, 0.10), (0.355, 0.355) ] # Randomly choose one", "== \"__main__\": # execute only if run as a script", "yctr+0.5) return xrng, yrng def processJulia(self, cpxNum, xrng, yrng): \"\"\"", "PNG file to write on disk \"\"\" # List of", "height) :param dpi: dots per inch (default 300) \"\"\" #", "/ self.size * width + xrng[0] # Loop over y", "cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ] #", "(xctr-0.5, xctr+0.5) yrng = (yctr-0.5, yctr+0.5) return xrng, yrng def", "is defined as a square width = xrng[1] - xrng[0]", "value realPart = cpxTmp[0] * rsigma imagPart = cpxTmp[1] *", "pieces of fractals xctr = random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) #", "xrng: range of values (min, max) for the x-axis :param", "__name__ == \"__main__\": # execute only if run as a", "if true the Julia set is normalized by its absolute", "# Process julia = self.processJulia(cpxNum, xrng, yrng) # Normalization if(self.norm):", "\"\"\" # Initialize Julia Set instance juliaInstance = JuliaSet() #", "# Disable axis plt.axis('off') if(show): plt.show() else: # Write on", "is `False` th eoutput image will be written as a", "= cpxTmp**2 + cpxNum # Increment iteration counter it +=", "3) if 'niter' in kwargs: self.niter = kwargs.pop('niter', 250) #", "# Get a value variation for for real and imaginary", "value variation for for real and imaginary parts # The", "# Horizontal mirroring and concatenate juliamirror = np.flip(julia, axis=1) julia", "= kwargs.pop('dpi', 300) if 'norm' in kwargs: self.norm = kwargs.pop('norm',", "300 self.norm = True self.mirror = False # Initialize process", "set is normalized by its absolute maximum value. :param show:", "slightly manipulate version of the input \"\"\" # Get the", "For more randomness, the target area is a random subset", "concatenate juliamirror = np.flip(julia, axis=1) julia = np.concatenate((julia, juliamirror), axis=1)", "random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) # Extend around the center xrng", "kwargs: self.mirror = kwargs.pop('mirror', False) # Process related parameters if", "* width + xrng[0] # Loop over y range for", "it on disk as a png file. :param julia: the", "if 'dpi' in kwargs: self.dpi = kwargs.pop('dpi', 300) if 'norm'", "Random choice in a list of best complex values for", "little more unique. :param cpxTmp: complex value to modify :param", "isigma = random.uniform(0.98, 1.02) # Apply modification and return the", "return complex(realPart, imagPart) def getTargetArea(self): \"\"\" For more randomness, the", "1.5] :return xrng, yrng: tuples containing (xmin, xmax) and (ymin,", "the real part realPart = float(ix) / self.size * width", "] # Randomly choose one cpxTmp = random.choice(cpxList) # Manipulate", "x range for ix in range(self.size): # Get the pixel", "The possible variation range is fixed at +/- 2% to", "Initialize image related parameters self.size = 256 self.dpi = 300", "slightly to make it a little more unique. :param cpxTmp:", "self.getComplexValue() # Get the target area # For more randomness,", "imaginary parts # The possible variation range is fixed at", "axis=1) # Vertical mirroring and concatenate juliamirror = np.flip(julia, axis=0)", "= cpxTmp[1] * isigma * isign return complex(realPart, imagPart) def", "Initialize numpy array of dimensions (size, size) with zeros julia", "true the Julia set is normalized by its absolute maximum", "def __init__(self): \"\"\" Constructor of the JuliaSet class :param size:", "- xmin = ymax - ymin # Randomly choose the", "shade return julia def plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\" Plot", "variation for for real and imaginary parts # The possible", "always pieces of fractals xctr = random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0)", "on disk as a png file. :param julia: the Julia", ":param size: size in pixels (for both width and height)", "write on disk \"\"\" # Get a complex value among", "julia(**kwargs): \"\"\" temp \"\"\" # Initialize Julia Set instance juliaInstance", "cmap=cmapName) # Disable axis plt.axis('off') if(show): plt.show() else: # Write", "(-0.54, 0.54), (0.340, -0.05), (0.37, 0.10), (0.355, 0.355) ] #", "= 250 def param(self, **kwargs): \"\"\" Get parameters from input", "at +/- 2% to stay # In the neightborhood of", "(size, size) with zeros julia = np.ones((self.size, self.size), dtype=np.float32) #", "pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\" temp \"\"\" # Initialize Julia", "the complex plane # For the real part realPart =", "+ cpxNum # Increment iteration counter it += 1 #", "generator :param mirror: if True the julia is mirrored horizontally", "# Plot the image with a gaussian interpolation fig =", ":param cpxTmp: complex value to modify :param cpxNum: a slightly", "a random colormap using matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self):", "polynomial cpxTmp = cpxTmp**2 + cpxNum # Increment iteration counter", "\"\"\" Run the Julia set generator :param mirror: if True", "the Julia set is normalized by its absolute maximum value.", "yrng: range of values (min, max) for the y-axis :param", "0.355) ] # Randomly choose one cpxTmp = random.choice(cpxList) #", "some invalid keywords if kwargs: print(\"{} are invalid keyword arguments!\".format(kwargs.keys()))", "range for ix in range(self.size): # Get the pixel position", "and return the new complex value realPart = cpxTmp[0] *", "target area is a random subset of a wide one", "parameters self.escrad = 3 self.niter = 250 def param(self, **kwargs):", "is fixed at +/- 2% to stay # In the", "<= self.escrad**2 and it < self.niter): # Quadratic polynomial cpxTmp", "Possible values are in [-1.0, 1.0] to stay in an", "to height) of the image since the # image is", "the output with a random colormap using matplotlib self.plotJuliaSet(julia, show=show,", "a random # subset of a wide one defined with", "value. :param show: if show is `False` th eoutput image", "\"\"\" # Get a complex value among a list of", "for the y-axis :param escrad: escape radius :param niter: maximum", "it += 1 # Calculate the shade (a cool thing", "random colormap using matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self): \"\"\"", "1, 2) # Get a value variation for for real", "list of best Julia sets cpxNum = self.getComplexValue() # Get", ":param cpxNum: a slightly manipulate version of the input \"\"\"", "# Loop over x range for ix in range(self.size): #", "of values (min, max) for the y-axis :param escrad: escape", "write on disk \"\"\" # List of beautiful colormap for", "complex value \"\"\" # Define the list of best complex", "both width and height) :param dpi: dots per inch (default", "of best complex values cpxList = [ (-0.10, 0.650), (0.00,", "niter: maximum number of iterations \"\"\" # Initialize numpy array", "cpxList = [ (-0.10, 0.650), (0.00, 0.80), (0.370, 0.100), (0.355,", "xrng[1] - xrng[0] # xmax - xmin = ymax -", "complex cpxTmp = complex(realPart, imagPart) # Initialize iteration counter it", "2% to stay # In the neightborhood of the initial", "250 def param(self, **kwargs): \"\"\" Get parameters from input dictionary", "Loop over iterations while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter):", "there is some invalid keywords if kwargs: print(\"{} are invalid", "position in the complex plane # For the real part", "# xmax - xmin = ymax - ymin # Randomly", "# execute only if run as a script genJuliaSet =", "the Julia set generator :param mirror: if True the julia", "= random.uniform(0.98, 1.02) isigma = random.uniform(0.98, 1.02) # Apply modification", "randomness, the target area is a random subset of a", "# Vertical mirroring and concatenate juliamirror = np.flip(julia, axis=0) julia", "# Initialize Julia Set instance juliaInstance = JuliaSet() # If", "ymin # Randomly choose the sign of the shade #ssign", "Mirroring if(self.mirror): # Horizontal mirroring and concatenate juliamirror = np.flip(julia,", "possible variation range is fixed at +/- 2% to stay", "imagPart = cpxTmp[1] * isigma * isign return complex(realPart, imagPart)", "Write on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\"", "xrng, yrng: tuples containing (xmin, xmax) and (ymin, ymax) \"\"\"", "`False` th eoutput image will be written as a PNG", "matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self): \"\"\" Random choice in", "kwargs: self.escrad = kwargs.pop('escrad', 3) if 'niter' in kwargs: self.niter", "show=False): \"\"\" Plot the output Julia set and show it", "def julia(**kwargs): \"\"\" temp \"\"\" # Initialize Julia Set instance", "for Julia sets (real, imag). :return cpxNum: a semi-random complex", "- np.sqrt(it/self.niter) # Fill the outpout array julia[ix][iy] = ssign", "outpout array julia[ix][iy] = ssign * shade return julia def", "return juliaInstance if __name__ == \"__main__\": # execute only if", "= np.concatenate((julia, juliamirror), axis=0) # Plot the output with a", "yrng: tuples containing (xmin, xmax) and (ymin, ymax) \"\"\" #", "imagPart) # Initialize iteration counter it = 0 # Loop", "new complex value realPart = cpxTmp[0] * rsigma imagPart =", "the base value slightly to make it a little more", "random subset of a wide one defined with x[-1.5, 1.5]", "value slightly to make it a little more unique. :param", "- xrng[0] # xmax - xmin = ymax - ymin", "cool thing find somewhere on the net) shade = 1.", "cpxTmp = cpxTmp**2 + cpxNum # Increment iteration counter it", "the Julia set for the given input parameters. :param cpxNum:", "cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ] # Randomly", "Calculate the shade (a cool thing find somewhere on the", "if run as a script genJuliaSet = JuliaSet() genJuliaSet.param() genJuliaSet.run()", "2) # Get a value variation for for real and", "chose one colormap cmapName = random.choice(cmapList) # Plot the image", "in the complex plane # For the imaginary part imagPart", "the center of the target area # Possible values are", "more unique. :param cpxTmp: complex value to modify :param cpxNum:", "the complex plane # For the imaginary part imagPart =", "kwargs not empty update the attributes if kwargs is not", "# Loop over iterations while(np.abs(cpxTmp) <= self.escrad**2 and it <", "somewhere on the net) shade = 1. - np.sqrt(it/self.niter) #", "/ self.size * width + yrng[0] # Build the complex", "fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable axis plt.axis('off') if(show):", "will be written as a PNG file named `fname` :param", "**kwargs): \"\"\" Get parameters from input dictionary and set attributes.", "a semi-random complex value \"\"\" # Define the list of", "random.randrange(-1, 1, 2) ssign = -1. # Loop over x", "\"\"\" # Randomly choose the center of the target area", "input parameters. :param cpxNum: complex value acting as a seed", "class :param size: size in pixels (for both width and", "dimensions (size, size) with zeros julia = np.ones((self.size, self.size), dtype=np.float32)", "in kwargs: self.escrad = kwargs.pop('escrad', 3) if 'niter' in kwargs:", "cpxNum: a semi-random complex value \"\"\" # Define the list", "__init__(self): \"\"\" Constructor of the JuliaSet class :param size: size", "realPart = float(ix) / self.size * width + xrng[0] #", "+= 1 # Calculate the shade (a cool thing find", "the target area is a random subset of a wide", "xrng, yrng) # Normalization if(self.norm): julia /= np.amax(np.abs(julia)) # Mirroring", "modify :param cpxNum: a slightly manipulate version of the input", "3 self.niter = 250 def param(self, **kwargs): \"\"\" Get parameters", "if True the julia is mirrored horizontally and vertically; each", "# Image related parameters if 'size' in kwargs: self.size =", "inch (default 300) \"\"\" # Initialize image related parameters self.size", "produce a new image :param norm: if true the Julia", "self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self, cpxTmp): \"\"\" Manipulate the base", "np.amax(np.abs(julia)) # Mirroring if(self.mirror): # Horizontal mirroring and concatenate juliamirror", "True) if 'mirror' in kwargs: self.mirror = kwargs.pop('mirror', False) #", "# image is defined as a square width = xrng[1]", "* shade return julia def plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\"", "'escrad' in kwargs: self.escrad = kwargs.pop('escrad', 3) if 'niter' in", "area is a random subset of a wide one defined", "julia[ix][iy] = ssign * shade return julia def plotJuliaSet(self, julia,", "pixel position in the complex plane # For the real", "If kwargs is not empty there is some invalid keywords", "..., 'argN': value}` \"\"\" # Check if kwargs in not", "real and imaginary parts # The possible variation range is", "Get parameters from input dictionary and set attributes. :param kwargs:", "Normalization if(self.norm): julia /= np.amax(np.abs(julia)) # Mirroring if(self.mirror): # Horizontal", "show: if show is `False` th eoutput image will be", "complex plane # For the real part realPart = float(ix)", "a little more unique. :param cpxTmp: complex value to modify", "PNG file named `fname` :param fname: Name of the output", "imaginary parts isign = random.randrange(-1, 1, 2) # Get a", "vertically; each mirror is concatenate with the original to produce", "not None: # Image related parameters if 'size' in kwargs:", "in an # area where there are always pieces of", "more randomness, the target area is a random subset of", "the y-axis :param escrad: escape radius :param niter: maximum number", "the JuliaSet class :param size: size in pixels (for both", "complex(realPart, imagPart) # Initialize iteration counter it = 0 #", "3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable axis plt.axis('off') if(show): plt.show()", "in pixels (for both width and height) :param dpi: dots", "return xrng, yrng def processJulia(self, cpxNum, xrng, yrng): \"\"\" Calculate", "value \"\"\" # Define the list of best complex values", "as a png file. :param julia: the Julia set :param", "width + yrng[0] # Build the complex cpxTmp = complex(realPart,", "more randomness, the target area is a random # subset", "0.100), (0.355, 0.355), (-0.54, 0.54), (0.340, -0.05), (0.37, 0.10), (0.355,", "Julia set :param show: if show is `False` th eoutput", ":param show: if show is `False` th eoutput image will", "value acting as a seed for the Julia set :param", "Fill the outpout array julia[ix][iy] = ssign * shade return", "plane # For the real part realPart = float(ix) /", "of a wide one defined with x[-1.5, 1.5] and #", "kwargs: self.norm = kwargs.pop('norm', True) if 'mirror' in kwargs: self.mirror", "= (xctr-0.5, xctr+0.5) yrng = (yctr-0.5, yctr+0.5) return xrng, yrng", "by its absolute maximum value. :param show: if show is", "axis=0) julia = np.concatenate((julia, juliamirror), axis=0) # Plot the output", "param(self, **kwargs): \"\"\" Get parameters from input dictionary and set", "class JuliaSet: def __init__(self): \"\"\" Constructor of the JuliaSet class", "a complex value among a list of best Julia sets", "cm.twilight_shifted, cm.plasma ] # Randomly chose one colormap cmapName =", "numpy array of dimensions (size, size) with zeros julia =", "the input \"\"\" # Get the signs for the imaginary", "disk \"\"\" # Get a complex value among a list", "getTargetArea(self): \"\"\" For more randomness, the target area is a", "one cpxTmp = random.choice(cpxList) # Manipulate the base value slightly", "cpxTmp[0] * rsigma imagPart = cpxTmp[1] * isigma * isign", "- ymin # Randomly choose the sign of the shade", "# For the imaginary part imagPart = float(iy) / self.size", "not empty update the attributes if kwargs is not None:", "target area # For more randomness, the target area is", "(for both width and height) :param dpi: dots per inch", "kwargs: self.niter = kwargs.pop('niter', 250) # If kwargs is not", "For the imaginary part imagPart = float(iy) / self.size *", "def run(self, show=False, fname='juilaset-output'): \"\"\" Run the Julia set generator", "cpxTmp = complex(realPart, imagPart) # Initialize iteration counter it =", "sets cpxNum = self.getComplexValue() # Get the target area #", "dpi: dots per inch (default 300) \"\"\" # Initialize image", "the image since the # image is defined as a", "yrng): \"\"\" Calculate the Julia set for the given input", "np import matplotlib.pyplot as plt import matplotlib.cm as cm import", "per inch (default 300) \"\"\" # Initialize image related parameters", "\"\"\" # Check if kwargs in not empty if kwargs", "Get the signs for the imaginary parts isign = random.randrange(-1,", "Loop over y range for iy in range(self.size): # Get", "as np import matplotlib.pyplot as plt import matplotlib.cm as cm", "around the center xrng = (xctr-0.5, xctr+0.5) yrng = (yctr-0.5,", "self.mirror = kwargs.pop('mirror', False) # Process related parameters if 'escrad'", "'niter' in kwargs: self.niter = kwargs.pop('niter', 250) # If kwargs", "since the # image is defined as a square width", "= 3 self.niter = 250 def param(self, **kwargs): \"\"\" Get", "`{'arg1':value, ..., 'argN': value}` \"\"\" # Check if kwargs in", "horizontally and vertically; each mirror is concatenate with the original", "In the neightborhood of the initial value rsigma = random.uniform(0.98,", "width + xrng[0] # Loop over y range for iy", "# Extend around the center xrng = (xctr-0.5, xctr+0.5) yrng", "position in the complex plane # For the imaginary part", "attributes if kwargs is not None: juliaInstance.param(**kwargs) return juliaInstance if", "# Initialize numpy array of dimensions (size, size) with zeros", "mirror: if True the julia is mirrored horizontally and vertically;", "< self.niter): # Quadratic polynomial cpxTmp = cpxTmp**2 + cpxNum", "size) with zeros julia = np.ones((self.size, self.size), dtype=np.float32) # Calculate", "# Process related parameters if 'escrad' in kwargs: self.escrad =", "and it < self.niter): # Quadratic polynomial cpxTmp = cpxTmp**2", "is normalized by its absolute maximum value. :param show: if", "to modify :param cpxNum: a slightly manipulate version of the", "absolute maximum value. :param show: if show is `False` th", "yrng def processJulia(self, cpxNum, xrng, yrng): \"\"\" Calculate the Julia", "x-axis :param yrng: range of values (min, max) for the", "plt.axis('off') if(show): plt.show() else: # Write on disk fig.savefig(fname+\".png\", dpi=self.dpi,", "and height) :param dpi: dots per inch (default 300) \"\"\"", "modification and return the new complex value realPart = cpxTmp[0]", "tuples containing (xmin, xmax) and (ymin, ymax) \"\"\" # Randomly", "set :param xrng: range of values (min, max) for the", "If kwargs not empty update the attributes if kwargs is", "def param(self, **kwargs): \"\"\" Get parameters from input dictionary and", "julia = np.concatenate((julia, juliamirror), axis=0) # Plot the output with", "area where there are always pieces of fractals xctr =", "a random subset of a wide one defined with x[-1.5,", "self.size = 256 self.dpi = 300 self.norm = True self.mirror", "x[-1.5, 1.5] and y[-1.5, 1.5] :return xrng, yrng: tuples containing", "iteration counter it = 0 # Loop over iterations while(np.abs(cpxTmp)", "return julia def plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\" Plot the", "\"\"\" For more randomness, the target area is a random", "given input parameters. :param cpxNum: complex value acting as a", "plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable axis plt.axis('off')", "Julia set is normalized by its absolute maximum value. :param", "= random.uniform(0.98, 1.02) # Apply modification and return the new", "in the complex plane # For the real part realPart", "not None: juliaInstance.param(**kwargs) return juliaInstance if __name__ == \"__main__\": #", "isign return complex(realPart, imagPart) def getTargetArea(self): \"\"\" For more randomness,", "image :param norm: if true the Julia set is normalized", "isign = random.randrange(-1, 1, 2) # Get a value variation", "subset of a wide one defined with x[-1.5, 1.5] and", "stay in an # area where there are always pieces", "rsigma = random.uniform(0.98, 1.02) isigma = random.uniform(0.98, 1.02) # Apply", "the outpout array julia[ix][iy] = ssign * shade return julia", "initial value rsigma = random.uniform(0.98, 1.02) isigma = random.uniform(0.98, 1.02)", "= complex(realPart, imagPart) # Initialize iteration counter it = 0", "\"\"\" Plot the output Julia set and show it in", "= random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) # Extend around the center", "the shade #ssign = random.randrange(-1, 1, 2) ssign = -1.", "semi-random complex value \"\"\" # Define the list of best", "= False # Initialize process related parameters self.escrad = 3", "'argN': value}` \"\"\" # Check if kwargs in not empty", "self.niter): # Quadratic polynomial cpxTmp = cpxTmp**2 + cpxNum #", "output Julia set and show it in matplotlib window or", "# Initialize iteration counter it = 0 # Loop over", "# In the neightborhood of the initial value rsigma =", "cm.plasma ] # Randomly chose one colormap cmapName = random.choice(cmapList)", "kwargs: a dictionary in the form `{'arg1':value, ..., 'argN': value}`", "be written as a PNG file named `fname` :param fname:", "shade #ssign = random.randrange(-1, 1, 2) ssign = -1. #", "Plot the output Julia set and show it in matplotlib", "stay # In the neightborhood of the initial value rsigma", "# If kwargs is not empty there is some invalid", "Randomly choose one cpxTmp = random.choice(cpxList) # Manipulate the base", "gaussian interpolation fig = plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName)", "image related parameters self.size = 256 self.dpi = 300 self.norm", "kwargs.pop('escrad', 3) if 'niter' in kwargs: self.niter = kwargs.pop('niter', 250)", "matplotlib.cm as cm import random class JuliaSet: def __init__(self): \"\"\"", "(0.00, 0.80), (0.370, 0.100), (0.355, 0.355), (-0.54, 0.54), (0.340, -0.05),", "1.02) isigma = random.uniform(0.98, 1.02) # Apply modification and return", "= random.choice(cpxList) # Manipulate the base value slightly to make", "# Loop over y range for iy in range(self.size): #", "defined as a square width = xrng[1] - xrng[0] #", "interpolation fig = plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) #", "of the initial value rsigma = random.uniform(0.98, 1.02) isigma =", "1, 2) ssign = -1. # Loop over x range", ":param niter: maximum number of iterations \"\"\" # Initialize numpy", "the new complex value realPart = cpxTmp[0] * rsigma imagPart", "`fname` :param fname: Name of the output PNG file to", "the shade (a cool thing find somewhere on the net)", "th eoutput image will be written as a PNG file", "keywords if kwargs: print(\"{} are invalid keyword arguments!\".format(kwargs.keys())) def run(self,", "/= np.amax(np.abs(julia)) # Mirroring if(self.mirror): # Horizontal mirroring and concatenate", "Apply modification and return the new complex value realPart =", "file named `fname` :param fname: Name of the output PNG", "y[-1.5, 1.5] :return xrng, yrng: tuples containing (xmin, xmax) and", "as a seed for the Julia set :param xrng: range", "# Initialize image related parameters self.size = 256 self.dpi =", "cpxTmp**2 + cpxNum # Increment iteration counter it += 1", "sets cmapList = [ cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary,", "Julia sets cmapList = [ cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno,", "if kwargs is not None: # Image related parameters if", "area # Possible values are in [-1.0, 1.0] to stay", "output PNG file to write on disk \"\"\" # List", "# Randomly choose the center of the target area #", "random class JuliaSet: def __init__(self): \"\"\" Constructor of the JuliaSet", "self.size), dtype=np.float32) # Calculate the width (equal to height) of", "related parameters self.escrad = 3 self.niter = 250 def param(self,", "#ssign = random.randrange(-1, 1, 2) ssign = -1. # Loop", "# Normalization if(self.norm): julia /= np.amax(np.abs(julia)) # Mirroring if(self.mirror): #", "cpxTmp): \"\"\" Manipulate the base value slightly to make it", "axis=0) # Plot the output with a random colormap using", "normalized by its absolute maximum value. :param show: if show", ":param norm: if true the Julia set is normalized by", "juliaInstance = JuliaSet() # If kwargs not empty update the", "julia /= np.amax(np.abs(julia)) # Mirroring if(self.mirror): # Horizontal mirroring and", "For more randomness, the target area is a random #", "(default 300) \"\"\" # Initialize image related parameters self.size =", "Define the list of best complex values cpxList = [", "acting as a seed for the Julia set :param xrng:", "to write on disk \"\"\" # List of beautiful colormap", "if __name__ == \"__main__\": # execute only if run as", "empty if kwargs is not None: # Image related parameters", "def plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\" Plot the output Julia", "self.processJulia(cpxNum, xrng, yrng) # Normalization if(self.norm): julia /= np.amax(np.abs(julia)) #", "is a random subset of a wide one defined with", "and y[-1.5, 1.5] :return xrng, yrng: tuples containing (xmin, xmax)", "cpxNum: complex value acting as a seed for the Julia", "0 # Loop over iterations while(np.abs(cpxTmp) <= self.escrad**2 and it", "# area where there are always pieces of fractals xctr", "the Julia set :param show: if show is `False` th", "the list of best complex values cpxList = [ (-0.10,", "[ cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma", "if 'niter' in kwargs: self.niter = kwargs.pop('niter', 250) # If", "of best complex values for Julia sets (real, imag). :return", "Get a value variation for for real and imaginary parts", "if 'size' in kwargs: self.size = kwargs.pop('size', 256) if 'dpi'", "a seed for the Julia set :param xrng: range of", "it = 0 # Loop over iterations while(np.abs(cpxTmp) <= self.escrad**2", "over iterations while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter): #", "best Julia sets cpxNum = self.getComplexValue() # Get the target", "set for the given input parameters. :param cpxNum: complex value", "execute only if run as a script genJuliaSet = JuliaSet()", "# Write on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs):", "the form `{'arg1':value, ..., 'argN': value}` \"\"\" # Check if", "kwargs: print(\"{} are invalid keyword arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'):", "with a gaussian interpolation fig = plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia,", "(-0.10, 0.650), (0.00, 0.80), (0.370, 0.100), (0.355, 0.355), (-0.54, 0.54),", "defined with x[-1.5, 1.5] and # y[-1.5, 1.5] xrng, yrng", "counter it = 0 # Loop over iterations while(np.abs(cpxTmp) <=", "# Define the list of best complex values cpxList =", "0.10), (0.355, 0.355) ] # Randomly choose one cpxTmp =", "Loop over x range for ix in range(self.size): # Get", "plane # For the imaginary part imagPart = float(iy) /", "counter it += 1 # Calculate the shade (a cool", "def processJulia(self, cpxNum, xrng, yrng): \"\"\" Calculate the Julia set", "of best Julia sets cpxNum = self.getComplexValue() # Get the", "ssign * shade return julia def plotJuliaSet(self, julia, fname='juilaset-output', show=False):", "image since the # image is defined as a square", "on disk \"\"\" # List of beautiful colormap for Julia", "parts isign = random.randrange(-1, 1, 2) # Get a value", "variation range is fixed at +/- 2% to stay #", "write it on disk as a png file. :param julia:", "# List of beautiful colormap for Julia sets cmapList =", "Get the target area # For more randomness, the target", "a dictionary in the form `{'arg1':value, ..., 'argN': value}` \"\"\"", "of the image since the # image is defined as", "JuliaSet() # If kwargs not empty update the attributes if", "a little more unique cpxNum = self.twearkComplex(cpxTmp) return cpxNum def", "xmin = ymax - ymin # Randomly choose the sign", "julia is mirrored horizontally and vertically; each mirror is concatenate", "if 'escrad' in kwargs: self.escrad = kwargs.pop('escrad', 3) if 'niter'", "xrng[0] # xmax - xmin = ymax - ymin #", ":param fname: Name of the output PNG file to write", "plt.show() else: # Write on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight')", "the neightborhood of the initial value rsigma = random.uniform(0.98, 1.02)", "a list of best complex values for Julia sets (real,", "parameters. :param cpxNum: complex value acting as a seed for", "for Julia sets cmapList = [ cm.Blues, cm.Greens, cm.Purples, cm.hot,", "one colormap cmapName = random.choice(cmapList) # Plot the image with", "the signs for the imaginary parts isign = random.randrange(-1, 1,", "(equal to height) of the image since the # image", "original to produce a new image :param norm: if true", "output with a random colormap using matplotlib self.plotJuliaSet(julia, show=show, fname=fname)", "Set instance juliaInstance = JuliaSet() # If kwargs not empty", "file to write on disk \"\"\" # Get a complex", "xrng, yrng def processJulia(self, cpxNum, xrng, yrng): \"\"\" Calculate the", "\"\"\" # Define the list of best complex values cpxList", "a square width = xrng[1] - xrng[0] # xmax -", "the image with a gaussian interpolation fig = plt.gcf() fig.set_size_inches(3.,", "cpxTmp[1] * isigma * isign return complex(realPart, imagPart) def getTargetArea(self):", ":param cpxNum: complex value acting as a seed for the", "the original to produce a new image :param norm: if", "little more unique cpxNum = self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self,", "a png file. :param julia: the Julia set :param show:", "random.choice(cmapList) # Plot the image with a gaussian interpolation fig", "a wide one defined with x[-1.5, 1.5] and y[-1.5, 1.5]", "the sign of the shade #ssign = random.randrange(-1, 1, 2)", "the pixel position in the complex plane # For the", "in matplotlib window or write it on disk as a", "of the target area # Possible values are in [-1.0,", "for the x-axis :param yrng: range of values (min, max)", "np.flip(julia, axis=1) julia = np.concatenate((julia, juliamirror), axis=1) # Vertical mirroring", "1. - np.sqrt(it/self.niter) # Fill the outpout array julia[ix][iy] =", "image with a gaussian interpolation fig = plt.gcf() fig.set_size_inches(3., 3.)", "concatenate juliamirror = np.flip(julia, axis=0) julia = np.concatenate((julia, juliamirror), axis=0)", "(0.355, 0.355) ] # Randomly choose one cpxTmp = random.choice(cpxList)", ":param yrng: range of values (min, max) for the y-axis", "if(self.mirror): # Horizontal mirroring and concatenate juliamirror = np.flip(julia, axis=1)", "import matplotlib.pyplot as plt import matplotlib.cm as cm import random", "False) # Process related parameters if 'escrad' in kwargs: self.escrad", "base value slightly to make it a little more unique", "over y range for iy in range(self.size): # Get the", "random.randrange(-1, 1, 2) # Get a value variation for for", "PNG file to write on disk \"\"\" # Get a", "to stay in an # area where there are always", "for the given input parameters. :param cpxNum: complex value acting", "self.niter = 250 def param(self, **kwargs): \"\"\" Get parameters from", "dtype=np.float32) # Calculate the width (equal to height) of the", "= self.getComplexValue() # Get the target area # For more", "colormap using matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self): \"\"\" Random", "Julia set and show it in matplotlib window or write", "# Increment iteration counter it += 1 # Calculate the", "area # For more randomness, the target area is a", "iterations while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter): # Quadratic", "attributes. :param kwargs: a dictionary in the form `{'arg1':value, ...,", "with x[-1.5, 1.5] and y[-1.5, 1.5] :return xrng, yrng: tuples", "value to modify :param cpxNum: a slightly manipulate version of", "instance juliaInstance = JuliaSet() # If kwargs not empty update", "the target area is a random # subset of a", "width = xrng[1] - xrng[0] # xmax - xmin =", "[ (-0.10, 0.650), (0.00, 0.80), (0.370, 0.100), (0.355, 0.355), (-0.54,", "size in pixels (for both width and height) :param dpi:", "wide one defined with x[-1.5, 1.5] and # y[-1.5, 1.5]", "more unique cpxNum = self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self, cpxTmp):", "+/- 2% to stay # In the neightborhood of the", "JuliaSet class :param size: size in pixels (for both width", "self.escrad = kwargs.pop('escrad', 3) if 'niter' in kwargs: self.niter =", "width (equal to height) of the image since the #", "def getTargetArea(self): \"\"\" For more randomness, the target area is", "among a list of best Julia sets cpxNum = self.getComplexValue()", "# Randomly choose one cpxTmp = random.choice(cpxList) # Manipulate the", "neightborhood of the initial value rsigma = random.uniform(0.98, 1.02) isigma", "values (min, max) for the y-axis :param escrad: escape radius", "image is defined as a square width = xrng[1] -", "in kwargs: self.mirror = kwargs.pop('mirror', False) # Process related parameters", "# subset of a wide one defined with x[-1.5, 1.5]", "cmapName = random.choice(cmapList) # Plot the image with a gaussian", "y range for iy in range(self.size): # Get the pixel", "is not None: juliaInstance.param(**kwargs) return juliaInstance if __name__ == \"__main__\":", ":return xrng, yrng: tuples containing (xmin, xmax) and (ymin, ymax)", "new image :param norm: if true the Julia set is", "= cpxTmp[0] * rsigma imagPart = cpxTmp[1] * isigma *", "(a cool thing find somewhere on the net) shade =", "Randomly chose one colormap cmapName = random.choice(cmapList) # Plot the", "are invalid keyword arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'): \"\"\" Run", "list of best complex values cpxList = [ (-0.10, 0.650),", "0.355), (-0.54, 0.54), (0.340, -0.05), (0.37, 0.10), (0.355, 0.355) ]", "(min, max) for the y-axis :param escrad: escape radius :param", "iterations \"\"\" # Initialize numpy array of dimensions (size, size)", "find somewhere on the net) shade = 1. - np.sqrt(it/self.niter)", "List of beautiful colormap for Julia sets cmapList = [", "Calculate the width (equal to height) of the image since", "choice in a list of best complex values for Julia", "Image related parameters if 'size' in kwargs: self.size = kwargs.pop('size',", "if kwargs is not None: juliaInstance.param(**kwargs) return juliaInstance if __name__", "cpxNum, xrng, yrng): \"\"\" Calculate the Julia set for the", "and concatenate juliamirror = np.flip(julia, axis=1) julia = np.concatenate((julia, juliamirror),", "(ymin, ymax) \"\"\" # Randomly choose the center of the", "there are always pieces of fractals xctr = random.uniform(-1.0,1.0) yctr", "the imaginary part imagPart = float(iy) / self.size * width", "of the output PNG file to write on disk \"\"\"", "kwargs.pop('mirror', False) # Process related parameters if 'escrad' in kwargs:", "JuliaSet: def __init__(self): \"\"\" Constructor of the JuliaSet class :param", "the width (equal to height) of the image since the", "# Plot the output with a random colormap using matplotlib", "ssign = -1. # Loop over x range for ix", "julia, fname='juilaset-output', show=False): \"\"\" Plot the output Julia set and", "Check if kwargs in not empty if kwargs is not", "True the julia is mirrored horizontally and vertically; each mirror", "return the new complex value realPart = cpxTmp[0] * rsigma", "choose the sign of the shade #ssign = random.randrange(-1, 1,", "xmax - xmin = ymax - ymin # Randomly choose", "shade (a cool thing find somewhere on the net) shade", ":param escrad: escape radius :param niter: maximum number of iterations", "for real and imaginary parts # The possible variation range", "kwargs.pop('dpi', 300) if 'norm' in kwargs: self.norm = kwargs.pop('norm', True)", "-1. # Loop over x range for ix in range(self.size):", "Plot the output with a random colormap using matplotlib self.plotJuliaSet(julia,", "= [ cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted,", "# Quadratic polynomial cpxTmp = cpxTmp**2 + cpxNum # Increment", "to make it a little more unique. :param cpxTmp: complex", "= ssign * shade return julia def plotJuliaSet(self, julia, fname='juilaset-output',", "keyword arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'): \"\"\" Run the Julia", "choose one cpxTmp = random.choice(cpxList) # Manipulate the base value", "as a PNG file named `fname` :param fname: Name of", "= random.randrange(-1, 1, 2) # Get a value variation for", "sign of the shade #ssign = random.randrange(-1, 1, 2) ssign", "temp \"\"\" # Initialize Julia Set instance juliaInstance = JuliaSet()", "= JuliaSet() # If kwargs not empty update the attributes", "julia = np.concatenate((julia, juliamirror), axis=1) # Vertical mirroring and concatenate", "named `fname` :param fname: Name of the output PNG file", "fname='juilaset-output', show=False): \"\"\" Plot the output Julia set and show", "only if run as a script genJuliaSet = JuliaSet() genJuliaSet.param()", "complex plane # For the imaginary part imagPart = float(iy)", "in a list of best complex values for Julia sets", "process related parameters self.escrad = 3 self.niter = 250 def", "maximum value. :param show: if show is `False` th eoutput", "parameters from input dictionary and set attributes. :param kwargs: a", "cpxNum: a slightly manipulate version of the input \"\"\" #", "show is `False` th eoutput image will be written as", "xrng, yrng = self.getTargetArea() # Process julia = self.processJulia(cpxNum, xrng,", "pixels (for both width and height) :param dpi: dots per", "of the input \"\"\" # Get the signs for the", "imagPart) def getTargetArea(self): \"\"\" For more randomness, the target area", "# For the real part realPart = float(ix) / self.size", "getComplexValue(self): \"\"\" Random choice in a list of best complex", "self.escrad**2 and it < self.niter): # Quadratic polynomial cpxTmp =", "random.uniform(-1.0,1.0) # Extend around the center xrng = (xctr-0.5, xctr+0.5)", "juliaInstance.param(**kwargs) return juliaInstance if __name__ == \"__main__\": # execute only", "make it a little more unique cpxNum = self.twearkComplex(cpxTmp) return", "dictionary and set attributes. :param kwargs: a dictionary in the", "def twearkComplex(self, cpxTmp): \"\"\" Manipulate the base value slightly to", "invalid keyword arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'): \"\"\" Run the", "= kwargs.pop('niter', 250) # If kwargs is not empty there", "of beautiful colormap for Julia sets cmapList = [ cm.Blues,", "colormap for Julia sets cmapList = [ cm.Blues, cm.Greens, cm.Purples,", "2) ssign = -1. # Loop over x range for", "For the real part realPart = float(ix) / self.size *", "fname=fname) def getComplexValue(self): \"\"\" Random choice in a list of", "if show is `False` th eoutput image will be written", "fig = plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable", "cm.rainbow, cm.twilight_shifted, cm.plasma ] # Randomly chose one colormap cmapName", "show=show, fname=fname) def getComplexValue(self): \"\"\" Random choice in a list", "] # Randomly chose one colormap cmapName = random.choice(cmapList) #", "cm import random class JuliaSet: def __init__(self): \"\"\" Constructor of", "values (min, max) for the x-axis :param yrng: range of", "Julia sets cpxNum = self.getComplexValue() # Get the target area", "numpy as np import matplotlib.pyplot as plt import matplotlib.cm as", "kwargs: self.dpi = kwargs.pop('dpi', 300) if 'norm' in kwargs: self.norm", "mirrored horizontally and vertically; each mirror is concatenate with the", "1 # Calculate the shade (a cool thing find somewhere", "Initialize iteration counter it = 0 # Loop over iterations", "# Randomly choose the sign of the shade #ssign =", "# Manipulate the base value slightly to make it a", "= plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable axis", "fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\" temp \"\"\" #", "part realPart = float(ix) / self.size * width + xrng[0]", "(yctr-0.5, yctr+0.5) return xrng, yrng def processJulia(self, cpxNum, xrng, yrng):", "list of best complex values for Julia sets (real, imag).", "in not empty if kwargs is not None: # Image", "to make it a little more unique cpxNum = self.twearkComplex(cpxTmp)", "cpxTmp: complex value to modify :param cpxNum: a slightly manipulate", "None: # Image related parameters if 'size' in kwargs: self.size", "and imaginary parts # The possible variation range is fixed", "float(ix) / self.size * width + xrng[0] # Loop over", "if(show): plt.show() else: # Write on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05,", "else: # Write on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def", "concatenate with the original to produce a new image :param", "dictionary in the form `{'arg1':value, ..., 'argN': value}` \"\"\" #", "on disk fig.savefig(fname+\".png\", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\" temp", "of values (min, max) for the x-axis :param yrng: range", "self.size * width + xrng[0] # Loop over y range", "escrad: escape radius :param niter: maximum number of iterations \"\"\"", "width and height) :param dpi: dots per inch (default 300)", "Julia set :param xrng: range of values (min, max) for", "for iy in range(self.size): # Get the pixel position in", "mirroring and concatenate juliamirror = np.flip(julia, axis=1) julia = np.concatenate((julia,", "signs for the imaginary parts isign = random.randrange(-1, 1, 2)", "part imagPart = float(iy) / self.size * width + yrng[0]", "# Calculate the shade (a cool thing find somewhere on", "complex value realPart = cpxTmp[0] * rsigma imagPart = cpxTmp[1]", "Process julia = self.processJulia(cpxNum, xrng, yrng) # Normalization if(self.norm): julia", "kwargs.pop('size', 256) if 'dpi' in kwargs: self.dpi = kwargs.pop('dpi', 300)", "julia def plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\" Plot the output", "cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ] # Randomly chose one", "where there are always pieces of fractals xctr = random.uniform(-1.0,1.0)", "set :param show: if show is `False` th eoutput image", "square width = xrng[1] - xrng[0] # xmax - xmin", "= random.uniform(-1.0,1.0) # Extend around the center xrng = (xctr-0.5,", "self.size = kwargs.pop('size', 256) if 'dpi' in kwargs: self.dpi =", "Name of the output PNG file to write on disk", "1.02) # Apply modification and return the new complex value", "in [-1.0, 1.0] to stay in an # area where", "iy in range(self.size): # Get the pixel position in the", "Initialize Julia Set instance juliaInstance = JuliaSet() # If kwargs", "disk \"\"\" # List of beautiful colormap for Julia sets", "def getComplexValue(self): \"\"\" Random choice in a list of best", "1.5] and # y[-1.5, 1.5] xrng, yrng = self.getTargetArea() #", "manipulate version of the input \"\"\" # Get the signs", "0.80), (0.370, 0.100), (0.355, 0.355), (-0.54, 0.54), (0.340, -0.05), (0.37,", "Calculate the Julia set for the given input parameters. :param", "self.dpi = kwargs.pop('dpi', 300) if 'norm' in kwargs: self.norm =", "choose the center of the target area # Possible values", "yrng) # Normalization if(self.norm): julia /= np.amax(np.abs(julia)) # Mirroring if(self.mirror):", "height) of the image since the # image is defined", "in kwargs: self.dpi = kwargs.pop('dpi', 300) if 'norm' in kwargs:", "if kwargs: print(\"{} are invalid keyword arguments!\".format(kwargs.keys())) def run(self, show=False,", "a PNG file named `fname` :param fname: Name of the", "Process related parameters if 'escrad' in kwargs: self.escrad = kwargs.pop('escrad',", "1.5] and y[-1.5, 1.5] :return xrng, yrng: tuples containing (xmin,", "# y[-1.5, 1.5] xrng, yrng = self.getTargetArea() # Process julia", "kwargs is not None: # Image related parameters if 'size'", "Increment iteration counter it += 1 # Calculate the shade", "the output PNG file to write on disk \"\"\" #", "* width + yrng[0] # Build the complex cpxTmp =", "matplotlib window or write it on disk as a png", "= 0 # Loop over iterations while(np.abs(cpxTmp) <= self.escrad**2 and", "import random class JuliaSet: def __init__(self): \"\"\" Constructor of the", "Julia Set instance juliaInstance = JuliaSet() # If kwargs not", "an # area where there are always pieces of fractals", "form `{'arg1':value, ..., 'argN': value}` \"\"\" # Check if kwargs", "written as a PNG file named `fname` :param fname: Name", "set attributes. :param kwargs: a dictionary in the form `{'arg1':value,", "of the shade #ssign = random.randrange(-1, 1, 2) ssign =", "= random.choice(cmapList) # Plot the image with a gaussian interpolation", "self.dpi = 300 self.norm = True self.mirror = False #", "'norm' in kwargs: self.norm = kwargs.pop('norm', True) if 'mirror' in", "= kwargs.pop('mirror', False) # Process related parameters if 'escrad' in", "y[-1.5, 1.5] xrng, yrng = self.getTargetArea() # Process julia =", "# If kwargs not empty update the attributes if kwargs", "250) # If kwargs is not empty there is some", "each mirror is concatenate with the original to produce a", "= np.flip(julia, axis=1) julia = np.concatenate((julia, juliamirror), axis=1) # Vertical", "xctr+0.5) yrng = (yctr-0.5, yctr+0.5) return xrng, yrng def processJulia(self,", "make it a little more unique. :param cpxTmp: complex value", "range(self.size): # Get the pixel position in the complex plane", "show it in matplotlib window or write it on disk", "value rsigma = random.uniform(0.98, 1.02) isigma = random.uniform(0.98, 1.02) #", "file to write on disk \"\"\" # List of beautiful", "(min, max) for the x-axis :param yrng: range of values", "for for real and imaginary parts # The possible variation", ":param mirror: if True the julia is mirrored horizontally and", "# The possible variation range is fixed at +/- 2%", "in kwargs: self.size = kwargs.pop('size', 256) if 'dpi' in kwargs:", "xrng, yrng): \"\"\" Calculate the Julia set for the given", "self.escrad = 3 self.niter = 250 def param(self, **kwargs): \"\"\"", "yrng = (yctr-0.5, yctr+0.5) return xrng, yrng def processJulia(self, cpxNum,", "axis plt.axis('off') if(show): plt.show() else: # Write on disk fig.savefig(fname+\".png\",", "the complex cpxTmp = complex(realPart, imagPart) # Initialize iteration counter", "file. :param julia: the Julia set :param show: if show", "its absolute maximum value. :param show: if show is `False`", "= -1. # Loop over x range for ix in", "values for Julia sets (real, imag). :return cpxNum: a semi-random", "unique cpxNum = self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self, cpxTmp): \"\"\"", "the imaginary parts isign = random.randrange(-1, 1, 2) # Get", "300) \"\"\" # Initialize image related parameters self.size = 256", "invalid keywords if kwargs: print(\"{} are invalid keyword arguments!\".format(kwargs.keys())) def", "# Initialize process related parameters self.escrad = 3 self.niter =", "target area # Possible values are in [-1.0, 1.0] to", "if(self.norm): julia /= np.amax(np.abs(julia)) # Mirroring if(self.mirror): # Horizontal mirroring", "to write on disk \"\"\" # Get a complex value", "a value variation for for real and imaginary parts #", "import numpy as np import matplotlib.pyplot as plt import matplotlib.cm", "range of values (min, max) for the y-axis :param escrad:", "# Get the pixel position in the complex plane #", "it a little more unique cpxNum = self.twearkComplex(cpxTmp) return cpxNum", "[-1.0, 1.0] to stay in an # area where there", "seed for the Julia set :param xrng: range of values", "rsigma imagPart = cpxTmp[1] * isigma * isign return complex(realPart,", "value among a list of best Julia sets cpxNum =", "number of iterations \"\"\" # Initialize numpy array of dimensions", "run(self, show=False, fname='juilaset-output'): \"\"\" Run the Julia set generator :param", "Build the complex cpxTmp = complex(realPart, imagPart) # Initialize iteration", "= 300 self.norm = True self.mirror = False # Initialize", "net) shade = 1. - np.sqrt(it/self.niter) # Fill the outpout", "True self.mirror = False # Initialize process related parameters self.escrad", "= True self.mirror = False # Initialize process related parameters", "to produce a new image :param norm: if true the", "'mirror' in kwargs: self.mirror = kwargs.pop('mirror', False) # Process related", "ymax) \"\"\" # Randomly choose the center of the target", "it in matplotlib window or write it on disk as", "\"\"\" Get parameters from input dictionary and set attributes. :param", "in range(self.size): # Get the pixel position in the complex", "cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ]", "is mirrored horizontally and vertically; each mirror is concatenate with", "random.choice(cpxList) # Manipulate the base value slightly to make it", "containing (xmin, xmax) and (ymin, ymax) \"\"\" # Randomly choose", "center of the target area # Possible values are in", "beautiful colormap for Julia sets cmapList = [ cm.Blues, cm.Greens,", "value}` \"\"\" # Check if kwargs in not empty if", "with a random colormap using matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def", "output PNG file to write on disk \"\"\" # Get", "complex value to modify :param cpxNum: a slightly manipulate version", "Extend around the center xrng = (xctr-0.5, xctr+0.5) yrng =", "julia = np.ones((self.size, self.size), dtype=np.float32) # Calculate the width (equal", "related parameters if 'escrad' in kwargs: self.escrad = kwargs.pop('escrad', 3)", ":param julia: the Julia set :param show: if show is", "kwargs is not None: juliaInstance.param(**kwargs) return juliaInstance if __name__ ==", "Julia sets (real, imag). :return cpxNum: a semi-random complex value", "plotJuliaSet(self, julia, fname='juilaset-output', show=False): \"\"\" Plot the output Julia set", "image will be written as a PNG file named `fname`", "max) for the x-axis :param yrng: range of values (min,", "if 'norm' in kwargs: self.norm = kwargs.pop('norm', True) if 'mirror'", "max) for the y-axis :param escrad: escape radius :param niter:", "a wide one defined with x[-1.5, 1.5] and # y[-1.5,", "real part realPart = float(ix) / self.size * width +", "shade = 1. - np.sqrt(it/self.niter) # Fill the outpout array", "range of values (min, max) for the x-axis :param yrng:", "if 'mirror' in kwargs: self.mirror = kwargs.pop('mirror', False) # Process", "print(\"{} are invalid keyword arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'): \"\"\"", "axis=1) julia = np.concatenate((julia, juliamirror), axis=1) # Vertical mirroring and", "the Julia set :param xrng: range of values (min, max)", "plt.imshow(julia, interpolation='gaussian', cmap=cmapName) # Disable axis plt.axis('off') if(show): plt.show() else:", "Get a complex value among a list of best Julia", "juliamirror = np.flip(julia, axis=1) julia = np.concatenate((julia, juliamirror), axis=1) #", "sets (real, imag). :return cpxNum: a semi-random complex value \"\"\"", "return cpxNum def twearkComplex(self, cpxTmp): \"\"\" Manipulate the base value", "self.getTargetArea() # Process julia = self.processJulia(cpxNum, xrng, yrng) # Normalization", "\"\"\" # Initialize numpy array of dimensions (size, size) with", "cpxTmp = random.choice(cpxList) # Manipulate the base value slightly to", "center xrng = (xctr-0.5, xctr+0.5) yrng = (yctr-0.5, yctr+0.5) return", "zeros julia = np.ones((self.size, self.size), dtype=np.float32) # Calculate the width", "\"\"\" Calculate the Julia set for the given input parameters.", "300) if 'norm' in kwargs: self.norm = kwargs.pop('norm', True) if", "kwargs.pop('norm', True) if 'mirror' in kwargs: self.mirror = kwargs.pop('mirror', False)", "parameters if 'escrad' in kwargs: self.escrad = kwargs.pop('escrad', 3) if", "of a wide one defined with x[-1.5, 1.5] and y[-1.5,", "np.ones((self.size, self.size), dtype=np.float32) # Calculate the width (equal to height)", "juliamirror = np.flip(julia, axis=0) julia = np.concatenate((julia, juliamirror), axis=0) #", "best complex values cpxList = [ (-0.10, 0.650), (0.00, 0.80),", ":return cpxNum: a semi-random complex value \"\"\" # Define the", "kwargs in not empty if kwargs is not None: #", "Randomly choose the sign of the shade #ssign = random.randrange(-1,", "# Build the complex cpxTmp = complex(realPart, imagPart) # Initialize", "False # Initialize process related parameters self.escrad = 3 self.niter", "is a random # subset of a wide one defined", "values cpxList = [ (-0.10, 0.650), (0.00, 0.80), (0.370, 0.100),", "\"\"\" Manipulate the base value slightly to make it a", "of the JuliaSet class :param size: size in pixels (for", "as cm import random class JuliaSet: def __init__(self): \"\"\" Constructor", "on the net) shade = 1. - np.sqrt(it/self.niter) # Fill", "Julia set generator :param mirror: if True the julia is", "'size' in kwargs: self.size = kwargs.pop('size', 256) if 'dpi' in", "# Get a complex value among a list of best", "from input dictionary and set attributes. :param kwargs: a dictionary", "area is a random # subset of a wide one", "arguments!\".format(kwargs.keys())) def run(self, show=False, fname='juilaset-output'): \"\"\" Run the Julia set", "window or write it on disk as a png file.", "maximum number of iterations \"\"\" # Initialize numpy array of", "'dpi' in kwargs: self.dpi = kwargs.pop('dpi', 300) if 'norm' in", "* rsigma imagPart = cpxTmp[1] * isigma * isign return", "xrng[0] # Loop over y range for iy in range(self.size):", "with x[-1.5, 1.5] and # y[-1.5, 1.5] xrng, yrng =", "(0.340, -0.05), (0.37, 0.10), (0.355, 0.355) ] # Randomly choose", "xrng = (xctr-0.5, xctr+0.5) yrng = (yctr-0.5, yctr+0.5) return xrng,", "array julia[ix][iy] = ssign * shade return julia def plotJuliaSet(self,", "# For more randomness, the target area is a random", "cpxNum = self.getComplexValue() # Get the target area # For", "related parameters self.size = 256 self.dpi = 300 self.norm =", "julia: the Julia set :param show: if show is `False`", "self.norm = kwargs.pop('norm', True) if 'mirror' in kwargs: self.mirror =", "range is fixed at +/- 2% to stay # In", "\"\"\" temp \"\"\" # Initialize Julia Set instance juliaInstance =", "(real, imag). :return cpxNum: a semi-random complex value \"\"\" #", "juliaInstance if __name__ == \"__main__\": # execute only if run", "the julia is mirrored horizontally and vertically; each mirror is", "ymax - ymin # Randomly choose the sign of the", "Disable axis plt.axis('off') if(show): plt.show() else: # Write on disk", "related parameters if 'size' in kwargs: self.size = kwargs.pop('size', 256)", "wide one defined with x[-1.5, 1.5] and y[-1.5, 1.5] :return", "import matplotlib.cm as cm import random class JuliaSet: def __init__(self):", "# Calculate the width (equal to height) of the image", "Constructor of the JuliaSet class :param size: size in pixels", "array of dimensions (size, size) with zeros julia = np.ones((self.size,", "a list of best Julia sets cpxNum = self.getComplexValue() #", "bbox_inches='tight') def julia(**kwargs): \"\"\" temp \"\"\" # Initialize Julia Set", "on disk \"\"\" # Get a complex value among a", "disk as a png file. :param julia: the Julia set", "= self.processJulia(cpxNum, xrng, yrng) # Normalization if(self.norm): julia /= np.amax(np.abs(julia))", "\"__main__\": # execute only if run as a script genJuliaSet", "mirroring and concatenate juliamirror = np.flip(julia, axis=0) julia = np.concatenate((julia,", "= self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self, cpxTmp): \"\"\" Manipulate the", "0.650), (0.00, 0.80), (0.370, 0.100), (0.355, 0.355), (-0.54, 0.54), (0.340,", "using matplotlib self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self): \"\"\" Random choice", "0.54), (0.340, -0.05), (0.37, 0.10), (0.355, 0.355) ] # Randomly", "size: size in pixels (for both width and height) :param", "= self.getTargetArea() # Process julia = self.processJulia(cpxNum, xrng, yrng) #", "(0.37, 0.10), (0.355, 0.355) ] # Randomly choose one cpxTmp", "a gaussian interpolation fig = plt.gcf() fig.set_size_inches(3., 3.) plt.imshow(julia, interpolation='gaussian',", "version of the input \"\"\" # Get the signs for", "y-axis :param escrad: escape radius :param niter: maximum number of", "kwargs is not empty there is some invalid keywords if", "256) if 'dpi' in kwargs: self.dpi = kwargs.pop('dpi', 300) if", "colormap cmapName = random.choice(cmapList) # Plot the image with a", "complex value among a list of best Julia sets cpxNum", "# Possible values are in [-1.0, 1.0] to stay in", "cpxNum def twearkComplex(self, cpxTmp): \"\"\" Manipulate the base value slightly", "= np.ones((self.size, self.size), dtype=np.float32) # Calculate the width (equal to", "# Check if kwargs in not empty if kwargs is", "= np.flip(julia, axis=0) julia = np.concatenate((julia, juliamirror), axis=0) # Plot", "self.niter = kwargs.pop('niter', 250) # If kwargs is not empty", "np.flip(julia, axis=0) julia = np.concatenate((julia, juliamirror), axis=0) # Plot the", "random.uniform(0.98, 1.02) isigma = random.uniform(0.98, 1.02) # Apply modification and", "= ymax - ymin # Randomly choose the sign of", "imaginary part imagPart = float(iy) / self.size * width +", "Quadratic polynomial cpxTmp = cpxTmp**2 + cpxNum # Increment iteration", "= float(iy) / self.size * width + yrng[0] # Build", "# Mirroring if(self.mirror): # Horizontal mirroring and concatenate juliamirror =", "yctr = random.uniform(-1.0,1.0) # Extend around the center xrng =", "the initial value rsigma = random.uniform(0.98, 1.02) isigma = random.uniform(0.98,", "parameters self.size = 256 self.dpi = 300 self.norm = True", "value slightly to make it a little more unique cpxNum", "imagPart = float(iy) / self.size * width + yrng[0] #", "in kwargs: self.norm = kwargs.pop('norm', True) if 'mirror' in kwargs:", "= [ (-0.10, 0.650), (0.00, 0.80), (0.370, 0.100), (0.355, 0.355),", "to stay # In the neightborhood of the initial value", "not empty if kwargs is not None: # Image related", "show=False, fname='juilaset-output'): \"\"\" Run the Julia set generator :param mirror:", "= kwargs.pop('norm', True) if 'mirror' in kwargs: self.mirror = kwargs.pop('mirror',", "# Apply modification and return the new complex value realPart", "are in [-1.0, 1.0] to stay in an # area", "fractals xctr = random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) # Extend around", "yrng = self.getTargetArea() # Process julia = self.processJulia(cpxNum, xrng, yrng)", "the output Julia set and show it in matplotlib window", "slightly to make it a little more unique cpxNum =", "the target area # Possible values are in [-1.0, 1.0]", "dots per inch (default 300) \"\"\" # Initialize image related", "empty update the attributes if kwargs is not None: juliaInstance.param(**kwargs)", "parts # The possible variation range is fixed at +/-", "update the attributes if kwargs is not None: juliaInstance.param(**kwargs) return", "= random.randrange(-1, 1, 2) ssign = -1. # Loop over", "is some invalid keywords if kwargs: print(\"{} are invalid keyword", "kwargs: self.size = kwargs.pop('size', 256) if 'dpi' in kwargs: self.dpi", "the target area # For more randomness, the target area", "(0.355, 0.355), (-0.54, 0.54), (0.340, -0.05), (0.37, 0.10), (0.355, 0.355)", "range for iy in range(self.size): # Get the pixel position", "# Get the target area # For more randomness, the", "self.mirror = False # Initialize process related parameters self.escrad =", "is concatenate with the original to produce a new image", "\"\"\" Constructor of the JuliaSet class :param size: size in", "plt import matplotlib.cm as cm import random class JuliaSet: def", "with zeros julia = np.ones((self.size, self.size), dtype=np.float32) # Calculate the", "np.concatenate((julia, juliamirror), axis=0) # Plot the output with a random", "parameters if 'size' in kwargs: self.size = kwargs.pop('size', 256) if", "and (ymin, ymax) \"\"\" # Randomly choose the center of", "fixed at +/- 2% to stay # In the neightborhood", "defined with x[-1.5, 1.5] and y[-1.5, 1.5] :return xrng, yrng:", "xctr = random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) # Extend around the", "twearkComplex(self, cpxTmp): \"\"\" Manipulate the base value slightly to make", ":param kwargs: a dictionary in the form `{'arg1':value, ..., 'argN':", "cpxNum # Increment iteration counter it += 1 # Calculate", "Manipulate the base value slightly to make it a little", "set and show it in matplotlib window or write it", "(0.370, 0.100), (0.355, 0.355), (-0.54, 0.54), (0.340, -0.05), (0.37, 0.10),", "one defined with x[-1.5, 1.5] and y[-1.5, 1.5] :return xrng,", "np.concatenate((julia, juliamirror), axis=1) # Vertical mirroring and concatenate juliamirror =", "set generator :param mirror: if True the julia is mirrored", "in the form `{'arg1':value, ..., 'argN': value}` \"\"\" # Check", "juliamirror), axis=0) # Plot the output with a random colormap", "+ xrng[0] # Loop over y range for iy in", "None: juliaInstance.param(**kwargs) return juliaInstance if __name__ == \"__main__\": # execute", "eoutput image will be written as a PNG file named", "of dimensions (size, size) with zeros julia = np.ones((self.size, self.size),", "Randomly choose the center of the target area # Possible", "self.norm = True self.mirror = False # Initialize process related", "it < self.niter): # Quadratic polynomial cpxTmp = cpxTmp**2 +", "isigma * isign return complex(realPart, imagPart) def getTargetArea(self): \"\"\" For", "is not None: # Image related parameters if 'size' in", "cpxNum = self.twearkComplex(cpxTmp) return cpxNum def twearkComplex(self, cpxTmp): \"\"\" Manipulate", "Vertical mirroring and concatenate juliamirror = np.flip(julia, axis=0) julia =", "xmax) and (ymin, ymax) \"\"\" # Randomly choose the center", "\"\"\" # Initialize image related parameters self.size = 256 self.dpi", "kwargs.pop('niter', 250) # If kwargs is not empty there is", "\"\"\" Random choice in a list of best complex values", "1.5] xrng, yrng = self.getTargetArea() # Process julia = self.processJulia(cpxNum,", "or write it on disk as a png file. :param", "* isign return complex(realPart, imagPart) def getTargetArea(self): \"\"\" For more", "Run the Julia set generator :param mirror: if True the", "\"\"\" # Get the signs for the imaginary parts isign", "\"\"\" # List of beautiful colormap for Julia sets cmapList", "the given input parameters. :param cpxNum: complex value acting as", "self.plotJuliaSet(julia, show=show, fname=fname) def getComplexValue(self): \"\"\" Random choice in a", "np.sqrt(it/self.niter) # Fill the outpout array julia[ix][iy] = ssign *", "float(iy) / self.size * width + yrng[0] # Build the", "= 256 self.dpi = 300 self.norm = True self.mirror =", "while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter): # Quadratic polynomial", "yrng[0] # Build the complex cpxTmp = complex(realPart, imagPart) #", "complex value acting as a seed for the Julia set", "and vertically; each mirror is concatenate with the original to", "and show it in matplotlib window or write it on", "random # subset of a wide one defined with x[-1.5,", "cmapList = [ cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno, cm.binary, cm.rainbow,", "input dictionary and set attributes. :param kwargs: a dictionary in", "# Get the signs for the imaginary parts isign =", "for the Julia set :param xrng: range of values (min,", "# Fill the outpout array julia[ix][iy] = ssign * shade", "for ix in range(self.size): # Get the pixel position in", "random.uniform(0.98, 1.02) # Apply modification and return the new complex", "of fractals xctr = random.uniform(-1.0,1.0) yctr = random.uniform(-1.0,1.0) # Extend", "x[-1.5, 1.5] and # y[-1.5, 1.5] xrng, yrng = self.getTargetArea()", "imag). :return cpxNum: a semi-random complex value \"\"\" # Define", "the center xrng = (xctr-0.5, xctr+0.5) yrng = (yctr-0.5, yctr+0.5)", "# Randomly chose one colormap cmapName = random.choice(cmapList) # Plot", "processJulia(self, cpxNum, xrng, yrng): \"\"\" Calculate the Julia set for", "iteration counter it += 1 # Calculate the shade (a", "radius :param niter: maximum number of iterations \"\"\" # Initialize", "the net) shade = 1. - np.sqrt(it/self.niter) # Fill the", "values are in [-1.0, 1.0] to stay in an #", "256 self.dpi = 300 self.norm = True self.mirror = False", "and concatenate juliamirror = np.flip(julia, axis=0) julia = np.concatenate((julia, juliamirror),", "the # image is defined as a square width =", "unique. :param cpxTmp: complex value to modify :param cpxNum: a", "png file. :param julia: the Julia set :param show: if", "complex values for Julia sets (real, imag). :return cpxNum: a", "for the imaginary parts isign = random.randrange(-1, 1, 2) #", "complex(realPart, imagPart) def getTargetArea(self): \"\"\" For more randomness, the target", "are always pieces of fractals xctr = random.uniform(-1.0,1.0) yctr =", "cm.hot, cm.inferno, cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma ] # Randomly chose", "self.size * width + yrng[0] # Build the complex cpxTmp", "+ yrng[0] # Build the complex cpxTmp = complex(realPart, imagPart)", "norm: if true the Julia set is normalized by its", "Initialize process related parameters self.escrad = 3 self.niter = 250", "in kwargs: self.niter = kwargs.pop('niter', 250) # If kwargs is", "randomness, the target area is a random # subset of", "a slightly manipulate version of the input \"\"\" # Get", "Horizontal mirroring and concatenate juliamirror = np.flip(julia, axis=1) julia =", ":param xrng: range of values (min, max) for the x-axis", "Plot the image with a gaussian interpolation fig = plt.gcf()", "= np.concatenate((julia, juliamirror), axis=1) # Vertical mirroring and concatenate juliamirror", "realPart = cpxTmp[0] * rsigma imagPart = cpxTmp[1] * isigma", "thing find somewhere on the net) shade = 1. -", "pixel position in the complex plane # For the imaginary", "= kwargs.pop('escrad', 3) if 'niter' in kwargs: self.niter = kwargs.pop('niter',", "= (yctr-0.5, yctr+0.5) return xrng, yrng def processJulia(self, cpxNum, xrng,", "target area is a random # subset of a wide", "the x-axis :param yrng: range of values (min, max) for", "and # y[-1.5, 1.5] xrng, yrng = self.getTargetArea() # Process", "best complex values for Julia sets (real, imag). :return cpxNum:", "julia = self.processJulia(cpxNum, xrng, yrng) # Normalization if(self.norm): julia /=", "(xmin, xmax) and (ymin, ymax) \"\"\" # Randomly choose the", "= xrng[1] - xrng[0] # xmax - xmin = ymax", "1.0] to stay in an # area where there are", "over x range for ix in range(self.size): # Get the", "fname='juilaset-output'): \"\"\" Run the Julia set generator :param mirror: if", "a new image :param norm: if true the Julia set", "dpi=self.dpi, pad_inches=0.05, bbox_inches='tight') def julia(**kwargs): \"\"\" temp \"\"\" # Initialize", "fname: Name of the output PNG file to write on", "escape radius :param niter: maximum number of iterations \"\"\" #", "as plt import matplotlib.cm as cm import random class JuliaSet:", "matplotlib.pyplot as plt import matplotlib.cm as cm import random class", "Get the pixel position in the complex plane # For", "as a square width = xrng[1] - xrng[0] # xmax", "Julia set for the given input parameters. :param cpxNum: complex", "= float(ix) / self.size * width + xrng[0] # Loop", "with the original to produce a new image :param norm:", "base value slightly to make it a little more unique.", "not empty there is some invalid keywords if kwargs: print(\"{}", "juliamirror), axis=1) # Vertical mirroring and concatenate juliamirror = np.flip(julia,", "interpolation='gaussian', cmap=cmapName) # Disable axis plt.axis('off') if(show): plt.show() else: #", "input \"\"\" # Get the signs for the imaginary parts", "the attributes if kwargs is not None: juliaInstance.param(**kwargs) return juliaInstance", ":param dpi: dots per inch (default 300) \"\"\" # Initialize", "* isigma * isign return complex(realPart, imagPart) def getTargetArea(self): \"\"\"", "one defined with x[-1.5, 1.5] and # y[-1.5, 1.5] xrng,", "empty there is some invalid keywords if kwargs: print(\"{} are" ]
[ "cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x,", "h, x : x + w] roi_color = frame[y :", "= cv2.VideoCapture(0) print(vs) while True: ret, frame = vs.read() if", "# eye_detection.py - detect eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/", "(x, y, w, h) in faces: roi_gray = gray[y :", "+ eh), (0, 0, 255), 2) cv2.imshow(\"Video\", frame) key =", "2) cv2.imshow(\"Video\", frame) key = cv2.waitKey(1) & 0xFF if key", "y + h, x : x + w] eyes =", "w] roi_color = frame[y : y + h, x :", "is None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray)", "webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import numpy", "to the webcam # try: vs = cv2.VideoCapture(0) print(vs) while", "# tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import numpy as", "gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x, y, w,", "cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference to the", "faceCascade.detectMultiScale(frame) for (x, y, w, h) in faces: roi_gray =", ": x + w] roi_color = frame[y : y +", "vs = cv2.VideoCapture(0) print(vs) while True: ret, frame = vs.read()", "0xFF if key == ord(\"q\") or key == 27: break", "cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference to the webcam # try:", "for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey),", "as np def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\")", "+ w] roi_color = frame[y : y + h, x", "vs.read() if frame is None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)", "= cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference to the webcam #", "= vs.read() if frame is None: break gray = cv2.cvtColor(frame,", "key == ord(\"q\") or key == 27: break cv2.destroyAllWindows() if", "y + h, x : x + w] roi_color =", "# grab the reference to the webcam # try: vs", "cv2.waitKey(1) & 0xFF if key == ord(\"q\") or key ==", "while True: ret, frame = vs.read() if frame is None:", "+ w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh)", "key == 27: break cv2.destroyAllWindows() if __name__ == \"__main__\": main()", "None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces", "x + w] roi_color = frame[y : y + h,", "def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab", "255), 2) cv2.imshow(\"Video\", frame) key = cv2.waitKey(1) & 0xFF if", "cv2.VideoCapture(0) print(vs) while True: ret, frame = vs.read() if frame", "w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in", "cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0,", "ew, ey + eh), (0, 0, 255), 2) cv2.imshow(\"Video\", frame)", "import math import numpy as np def main(): faceCascade =", "webcam # try: vs = cv2.VideoCapture(0) print(vs) while True: ret,", "gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame)", "roi_gray = gray[y : y + h, x : x", "key = cv2.waitKey(1) & 0xFF if key == ord(\"q\") or", "cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x, y, w, h) in", "h) in faces: roi_gray = gray[y : y + h,", "ret, frame = vs.read() if frame is None: break gray", "frame is None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray =", "= gray[y : y + h, x : x +", "= frame[y : y + h, x : x +", "cv2.imshow(\"Video\", frame) key = cv2.waitKey(1) & 0xFF if key ==", "np def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") #", "detect eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import", "= eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color,", "0, 255), 2) cv2.imshow(\"Video\", frame) key = cv2.waitKey(1) & 0xFF", "roi_color = frame[y : y + h, x : x", "h, x : x + w] eyes = eyeCascade.detectMultiScale(roi_gray) for", "= cv2.waitKey(1) & 0xFF if key == ord(\"q\") or key", "frame = vs.read() if frame is None: break gray =", "try: vs = cv2.VideoCapture(0) print(vs) while True: ret, frame =", "if key == ord(\"q\") or key == 27: break cv2.destroyAllWindows()", "cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x, y,", "x + w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew,", "if frame is None: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray", "in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey +", "the webcam # try: vs = cv2.VideoCapture(0) print(vs) while True:", "ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex +", "(0, 0, 255), 2) cv2.imshow(\"Video\", frame) key = cv2.waitKey(1) &", "x : x + w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex,", "print(vs) while True: ret, frame = vs.read() if frame is", "eye_detection.py - detect eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import", "import numpy as np def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade", "tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import numpy as np", "eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math", ": y + h, x : x + w] roi_color", "#!python3 # eye_detection.py - detect eyes using webcam # tutorial:", "(ex, ey), (ex + ew, ey + eh), (0, 0,", "+ h, x : x + w] roi_color = frame[y", "import cv2 import math import numpy as np def main():", "+ h, x : x + w] eyes = eyeCascade.detectMultiScale(roi_gray)", "https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import numpy as np def", "== ord(\"q\") or key == 27: break cv2.destroyAllWindows() if __name__", "frame[y : y + h, x : x + w]", "main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the", "w, h) in faces: roi_gray = gray[y : y +", "= cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for (x, y, w, h)", "eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey", "the reference to the webcam # try: vs = cv2.VideoCapture(0)", "& 0xFF if key == ord(\"q\") or key == 27:", "faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference", "eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes:", "eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference to the webcam", "ey + eh), (0, 0, 255), 2) cv2.imshow(\"Video\", frame) key", "+ ew, ey + eh), (0, 0, 255), 2) cv2.imshow(\"Video\",", "break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces =", "faces: roi_gray = gray[y : y + h, x :", "faces = faceCascade.detectMultiScale(frame) for (x, y, w, h) in faces:", "(ex + ew, ey + eh), (0, 0, 255), 2)", "- detect eyes using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2", "for (x, y, w, h) in faces: roi_gray = gray[y", "using webcam # tutorial: https://www.roytuts.com/real-time-eye-detection-in-webcam-using-python-3/ import cv2 import math import", "eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),", "# try: vs = cv2.VideoCapture(0) print(vs) while True: ret, frame", "eyeCascade.detectMultiScale(roi_gray) for (ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex,", "= cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\") # grab the reference to", "math import numpy as np def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\")", "ord(\"q\") or key == 27: break cv2.destroyAllWindows() if __name__ ==", "numpy as np def main(): faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt.xml\") eyeCascade =", "frame) key = cv2.waitKey(1) & 0xFF if key == ord(\"q\")", "reference to the webcam # try: vs = cv2.VideoCapture(0) print(vs)", "= faceCascade.detectMultiScale(frame) for (x, y, w, h) in faces: roi_gray", ": y + h, x : x + w] eyes", "(ex, ey, ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex", "grab the reference to the webcam # try: vs =", "= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) faces = faceCascade.detectMultiScale(frame) for", "<reponame>ShivanS93/VAtest_withOKN #!python3 # eye_detection.py - detect eyes using webcam #", "ew, eh) in eyes: cv2.rectangle(roi_color, (ex, ey), (ex + ew,", "True: ret, frame = vs.read() if frame is None: break", "eh), (0, 0, 255), 2) cv2.imshow(\"Video\", frame) key = cv2.waitKey(1)", "or key == 27: break cv2.destroyAllWindows() if __name__ == \"__main__\":", "cv2 import math import numpy as np def main(): faceCascade", "x : x + w] roi_color = frame[y : y", "ey), (ex + ew, ey + eh), (0, 0, 255),", "gray[y : y + h, x : x + w]", ": x + w] eyes = eyeCascade.detectMultiScale(roi_gray) for (ex, ey,", "in faces: roi_gray = gray[y : y + h, x", "y, w, h) in faces: roi_gray = gray[y : y" ]
[ "line in fh: cols = line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] =", "= {} with open('macaca_genes.txt') as fh: fh.readline() for line in", "else: descripts[cols[0]] = cols[1] with open('gene_info.txt') as fh: for line", "cols = line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]]", "= cols[1] with open('gene_info.txt') as fh: for line in fh:", "fh: for line in fh: cols = line.strip().split('\\t') cols.append(descripts[cols[1]]) print", "descripts[cols[0]] = cols[1] with open('gene_info.txt') as fh: for line in", "open('macaca_genes.txt') as fh: fh.readline() for line in fh: cols =", "{} with open('macaca_genes.txt') as fh: fh.readline() for line in fh:", "for line in fh: cols = line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]]", "fh.readline() for line in fh: cols = line.strip('\\n').split('\\t') if cols[1]:", "as fh: fh.readline() for line in fh: cols = line.strip('\\n').split('\\t')", "with open('gene_info.txt') as fh: for line in fh: cols =", "= cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1] with open('gene_info.txt') as fh:", "in fh: cols = line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip()", "cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1] with open('gene_info.txt')", "<gh_stars>0 #!/usr/bin/env python descripts = {} with open('macaca_genes.txt') as fh:", "descripts = {} with open('macaca_genes.txt') as fh: fh.readline() for line", "for line in fh: cols = line.strip().split('\\t') cols.append(descripts[cols[1]]) print \"\\t\".join(cols)", "with open('macaca_genes.txt') as fh: fh.readline() for line in fh: cols", "= line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]] =", "line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1]", "if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1] with", "python descripts = {} with open('macaca_genes.txt') as fh: fh.readline() for", "#!/usr/bin/env python descripts = {} with open('macaca_genes.txt') as fh: fh.readline()", "cols[1] with open('gene_info.txt') as fh: for line in fh: cols", "fh: cols = line.strip('\\n').split('\\t') if cols[1]: descripts[cols[0]] = cols[1].split('[')[0].strip() else:", "cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1] with open('gene_info.txt') as fh: for", "as fh: for line in fh: cols = line.strip().split('\\t') cols.append(descripts[cols[1]])", "open('gene_info.txt') as fh: for line in fh: cols = line.strip().split('\\t')", "descripts[cols[0]] = cols[1].split('[')[0].strip() else: descripts[cols[0]] = cols[1] with open('gene_info.txt') as", "fh: fh.readline() for line in fh: cols = line.strip('\\n').split('\\t') if" ]
[ "sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event def make_one_mixture_coherent(sc,", "False while not check: sc, event = instantiate_and_get_event_spec( sc, labels[0],", "label)), source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift),", "sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) ) except: logging.exception( f\"Got", "for each source. \"\"\" check = False while not check:", "error for {label} @ {_source_file}. Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav',", "None: raise ValueError(\"Coherent mixing requires explicit labels!\") generators = []", "sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc)", "must be set!\") if coherent and labels is None: raise", "1 mix_func = make_one_mixture_coherent if coherent else make_one_mixture def arg_tuple(i):", "return _args args = [arg_tuple(i) for i in range(num_mixtures)] #", "check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label']", "time_stretch=('const', event.time_stretch) ) except: logging.exception( f\"Got an error for {label}", "generators = [] if background_path is None: background_path = foreground_path", "path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\" Creates a single mixture, incoherent.", "( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent else num_sources, event_parameters,", "requires explicit labels!\") generators = [] if background_path is None:", "else num_sources, event_parameters, allow_repeated_label ) return _args args = [arg_tuple(i)", "range(num_mixtures)] # do one by itself for testing mix_func(*args[0]) args", "for i in range(num_mixtures): sc = Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path,", "= check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for scope in scopes:", "_reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters) event", "check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01: return False", "labels or num_sources must be set!\") if coherent and labels", "by itself for testing mix_func(*args[0]) args = list(zip(*args[1:])) args =", "event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const',", "if coherent else make_one_mixture def arg_tuple(i): _args = ( generators[i],", "raise ValueError(\"Coherent mixing requires explicit labels!\") generators = [] if", "mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate, target_folder, event_parameters, num_sources=None, labels=None, coherent=False,", "import copy import logging import p_tqdm import nussl import os", "= instantiate_and_get_event_spec( sc, labels[0], event_parameters) for label in labels: try:", "master_label, event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label)", "check: sc, event = instantiate_and_get_event_spec( sc, labels[0], event_parameters) for label", "bg_path=background_path, random_state=scaper_seed, ) sc.ref_db = ref_db sc.sr = sample_rate sc.bitdepth", "i in range(num_mixtures)] # do one by itself for testing", "allow_repeated_label): \"\"\" Creates a single mixture, incoherent. Instantiates according to", "coherent else num_sources, event_parameters, allow_repeated_label ) return _args args =", "import gin from scaper import Scaper, generate_from_jams import copy import", "labels!\") generators = [] if background_path is None: background_path =", "= [arg_tuple(i) for i in range(num_mixtures)] # do one by", "coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed", "is None and labels is None: raise ValueError(\"One of labels", "scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db = ref_db sc.sr =", "{label} @ {_source_file}. Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False,", "generate_from_jams import copy import logging import p_tqdm import nussl import", "logging import p_tqdm import nussl import os import numpy as", "num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder,", "args = [list(a) for a in args] # now do", "\"\"\" Creates a single mixture, incoherent. Instantiates according to the", "mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate, target_folder, event_parameters,", "= [list(a) for a in args] # now do the", "import Scaper, generate_from_jams import copy import logging import p_tqdm import", "label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const', sc.duration),", "Instantiates according to the event parameters for each source. \"\"\"", "copy import logging import p_tqdm import nussl import os import", ") sc.fg_spec = [] check = check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train',", "labels is None: raise ValueError(\"Coherent mixing requires explicit labels!\") generators", "label in labels: try: sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)),", ") _reset_event_spec(sc) check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc)", "import p_tqdm import nussl import os import numpy as np", "on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec", "background_path, scene_duration, sample_rate, target_folder, event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40,", "generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent else num_sources, event_parameters, allow_repeated_label", "0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) ) except:", "event.source_time), event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch)", "event.time_stretch) ) except: logging.exception( f\"Got an error for {label} @", "allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label,", "event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event def make_one_mixture_coherent(sc, path_to_file,", "check: for j in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'),", "+= 1 mix_func = make_one_mixture_coherent if coherent else make_one_mixture def", "i in range(num_mixtures): sc = Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed,", "gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate, target_folder,", "None and labels is None: raise ValueError(\"One of labels or", "scaper_seed += 1 mix_func = make_one_mixture_coherent if coherent else make_one_mixture", "return True def make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\" Creates", "an error for {label} @ {_source_file}. Moving on...\") sc.generate( path_to_file,", "background_path is None: background_path = foreground_path for i in range(num_mixtures):", "do one by itself for testing mix_func(*args[0]) args = list(zip(*args[1:]))", "= [] if background_path is None: background_path = foreground_path for", "Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db = ref_db sc.sr", "_reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms()", "'.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec = [] check =", "in scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path,", "according to the event parameters for each source. \"\"\" check", "[] check = check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for scope", "exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting mixing.') if num_sources is None", "if coherent else num_sources, event_parameters, allow_repeated_label ) return _args args", "event def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label): check = False", "scene_duration, sample_rate, target_folder, event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16,", "no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec = [] check = check_mixture(path_to_file)", "coherent and labels is None: raise ValueError(\"Coherent mixing requires explicit", "= Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db = ref_db", "'.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check = check_mixture(path_to_file) def", "if background_path is None: background_path = foreground_path for i in", "arg_tuple(i): _args = ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent", "_reset_event_spec(sc) check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters", "check = False while not check: sc, event = instantiate_and_get_event_spec(", "path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec = [] check", "event_parameters, allow_repeated_label): check = False while not check: sc, event", "label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const',", "be set!\") if coherent and labels is None: raise ValueError(\"Coherent", "gin from scaper import Scaper, generate_from_jams import copy import logging", "def make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\" Creates a single", "while not check: sc, event = instantiate_and_get_event_spec( sc, labels[0], event_parameters)", "sc.ref_db = ref_db sc.sr = sample_rate sc.bitdepth = bitdepth generators.append(sc)", "for i in range(num_mixtures)] # do one by itself for", "import logging import p_tqdm import nussl import os import numpy", "instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] = ('const',", "check = check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for scope in", "None: raise ValueError(\"One of labels or num_sources must be set!\")", "make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\" Creates a single mixture,", "@ {_source_file}. Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label,", "a in args] # now do the rest in parallel", "[arg_tuple(i) for i in range(num_mixtures)] # do one by itself", "mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01: return False return", "def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if", "sample_rate sc.bitdepth = bitdepth generators.append(sc) scaper_seed += 1 mix_func =", "make_scaper_datasets(scopes=['train', 'val']): for scope in scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable", "def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label): check = False while", "_args args = [arg_tuple(i) for i in range(num_mixtures)] # do", "explicit labels!\") generators = [] if background_path is None: background_path", "set!\") if coherent and labels is None: raise ValueError(\"Coherent mixing", ") except: logging.exception( f\"Got an error for {label} @ {_source_file}.", "testing mix_func(*args[0]) args = list(zip(*args[1:])) args = [list(a) for a", "check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for scope in scopes: with", "num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting mixing.') if", ".01: return False return True def make_one_mixture(sc, path_to_file, num_sources, event_parameters,", "= [] check = check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for", "for a in args] # now do the rest in", "num_sources, event_parameters, allow_repeated_label): \"\"\" Creates a single mixture, incoherent. Instantiates", "_event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return", "sc, event def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label): check =", "labels[0], event_parameters) for label in labels: try: sc.add_event( label=('const', label),", "sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms() <", "in range(num_mixtures)] # do one by itself for testing mix_func(*args[0])", "np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix)", "foreground_path for i in range(num_mixtures): sc = Scaper( scene_duration, fg_path=foreground_path,", "args] # now do the rest in parallel p_tqdm.p_map(mix_func, *args,", "ValueError(\"One of labels or num_sources must be set!\") if coherent", "for testing mix_func(*args[0]) args = list(zip(*args[1:])) args = [list(a) for", "= np.random.randint(100) logging.info('Starting mixing.') if num_sources is None and labels", "make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label): check = False while not", "from scaper import Scaper, generate_from_jams import copy import logging import", "@gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate, target_folder, event_parameters, num_sources=None,", "None: background_path = foreground_path for i in range(num_mixtures): sc =", "= False while not check: for j in range(num_sources): sc.add_event(**event_parameters)", "fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db = ref_db sc.sr = sample_rate", "itself for testing mix_func(*args[0]) args = list(zip(*args[1:])) args = [list(a)", "labels is None: raise ValueError(\"One of labels or num_sources must", "= list(zip(*args[1:])) args = [list(a) for a in args] #", "return False return True def make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label):", "f\"Got an error for {label} @ {_source_file}. Moving on...\") sc.generate(", "def arg_tuple(i): _args = ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if", "bitdepth generators.append(sc) scaper_seed += 1 mix_func = make_one_mixture_coherent if coherent", "path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec = []", "for scope in scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures,", "= ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent else num_sources,", "\"\"\" check = False while not check: for j in", "copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc)", "sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters,", "<filename>{{cookiecutter.repo_name}}/src/mix_with_scaper.py import gin from scaper import Scaper, generate_from_jams import copy", "{_source_file}. Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True,", "allow_repeated_label ) return _args args = [arg_tuple(i) for i in", "mixing requires explicit labels!\") generators = [] if background_path is", "target_folder, event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1):", "allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec = [] check = check_mixture(path_to_file) @gin.configurable", "= check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters)", "event parameters for each source. \"\"\" check = False while", "_event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters) event =", "if mix_signal.rms() < .01: return False return True def make_one_mixture(sc,", "make_one_mixture_coherent if coherent else make_one_mixture def arg_tuple(i): _args = (", "while not check: for j in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file,", "path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check =", "sc, labels[0], event_parameters) for label in labels: try: sc.add_event( label=('const',", "is None: background_path = foreground_path for i in range(num_mixtures): sc", "scaper_seed = np.random.randint(100) logging.info('Starting mixing.') if num_sources is None and", "logging.info('Starting mixing.') if num_sources is None and labels is None:", "path_to_file, labels, event_parameters, allow_repeated_label): check = False while not check:", "in range(num_mixtures): sc = Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, )", "_args = ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent else", "labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True)", "else make_one_mixture def arg_tuple(i): _args = ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'),", "= foreground_path for i in range(num_mixtures): sc = Scaper( scene_duration,", "is None: raise ValueError(\"Coherent mixing requires explicit labels!\") generators =", "event = instantiate_and_get_event_spec( sc, labels[0], event_parameters) for label in labels:", "and labels is None: raise ValueError(\"One of labels or num_sources", "event_parameters, allow_repeated_label ) return _args args = [arg_tuple(i) for i", "num_sources, event_parameters, allow_repeated_label ) return _args args = [arg_tuple(i) for", "scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration,", "_reset_event_spec(sc) return sc, event def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label):", "os.path.join(target_folder, f'{i:08d}.wav'), labels if coherent else num_sources, event_parameters, allow_repeated_label )", "os import numpy as np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def", "sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01:", "labels, event_parameters, allow_repeated_label): check = False while not check: sc,", "scaper import Scaper, generate_from_jams import copy import logging import p_tqdm", "sc, event = instantiate_and_get_event_spec( sc, labels[0], event_parameters) for label in", "= False while not check: sc, event = instantiate_and_get_event_spec( sc,", "the event parameters for each source. \"\"\" check = False", "nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01: return False return True def", "if coherent and labels is None: raise ValueError(\"Coherent mixing requires", "check = False while not check: for j in range(num_sources):", "source. \"\"\" check = False while not check: for j", "check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters =", "parameters for each source. \"\"\" check = False while not", "# now do the rest in parallel p_tqdm.p_map(mix_func, *args, num_cpus=num_workers)", "'val']): for scope in scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable def", "and labels is None: raise ValueError(\"Coherent mixing requires explicit labels!\")", "seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting mixing.')", "range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, )", "= ('const', master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc,", "= nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01: return False return True", "nussl import os import numpy as np def _reset_event_spec(sc): sc.reset_fg_event_spec()", "mix_signal.rms() < .01: return False return True def make_one_mixture(sc, path_to_file,", "[] if background_path is None: background_path = foreground_path for i", "ref_db sc.sr = sample_rate sc.bitdepth = bitdepth generators.append(sc) scaper_seed +=", "def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate, target_folder, event_parameters, num_sources=None, labels=None,", "try: sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const',", "for label in labels: try: sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0],", "source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const',", ") sc.ref_db = ref_db sc.sr = sample_rate sc.bitdepth = bitdepth", "def instantiate_and_get_event_spec(sc, master_label, event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] =", "in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True,", "num_sources is None and labels is None: raise ValueError(\"One of", "in args] # now do the rest in parallel p_tqdm.p_map(mix_func,", "coherent else make_one_mixture def arg_tuple(i): _args = ( generators[i], os.path.join(target_folder,", "mixing.') if num_sources is None and labels is None: raise", "path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check = check_mixture(path_to_file)", "not check: for j in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav',", "import os import numpy as np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec()", "sc.sr = sample_rate sc.bitdepth = bitdepth generators.append(sc) scaper_seed += 1", "@gin.configurable def make_scaper_datasets(scopes=['train', 'val']): for scope in scopes: with gin.config_scope(scope):", "range(num_mixtures): sc = Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db", "for j in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False,", "= bitdepth generators.append(sc) scaper_seed += 1 mix_func = make_one_mixture_coherent if", "make_one_mixture def arg_tuple(i): _args = ( generators[i], os.path.join(target_folder, f'{i:08d}.wav'), labels", "j in range(num_sources): sc.add_event(**event_parameters) sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label,", "os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting mixing.') if num_sources is", "f'{i:08d}.wav'), labels if coherent else num_sources, event_parameters, allow_repeated_label ) return", "return sc, event def make_one_mixture_coherent(sc, path_to_file, labels, event_parameters, allow_repeated_label): check", "snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) ) except: logging.exception( f\"Got an", "Scaper, generate_from_jams import copy import logging import p_tqdm import nussl", "ValueError(\"Coherent mixing requires explicit labels!\") generators = [] if background_path", "scope in scopes: with gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path,", "False while not check: for j in range(num_sources): sc.add_event(**event_parameters) sc.generate(", "event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) ) except: logging.exception(", "incoherent. Instantiates according to the event parameters for each source.", "sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check", "sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const', 0),", "with gin.config_scope(scope): mix_with_scaper() @gin.configurable def mix_with_scaper(num_mixtures, foreground_path, background_path, scene_duration, sample_rate,", "np.random.randint(100) logging.info('Starting mixing.') if num_sources is None and labels is", "def make_scaper_datasets(scopes=['train', 'val']): for scope in scopes: with gin.config_scope(scope): mix_with_scaper()", "= sample_rate sc.bitdepth = bitdepth generators.append(sc) scaper_seed += 1 mix_func", "of labels or num_sources must be set!\") if coherent and", "event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'], pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) )", "as np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal =", "p_tqdm import nussl import os import numpy as np def", "import nussl import os import numpy as np def _reset_event_spec(sc):", "bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting", "[list(a) for a in args] # now do the rest", "a single mixture, incoherent. Instantiates according to the event parameters", "sc.bitdepth = bitdepth generators.append(sc) scaper_seed += 1 mix_func = make_one_mixture_coherent", "is None: raise ValueError(\"One of labels or num_sources must be", "nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100) logging.info('Starting mixing.') if num_sources", "logging.exception( f\"Got an error for {label} @ {_source_file}. Moving on...\")", "except: logging.exception( f\"Got an error for {label} @ {_source_file}. Moving", "list(zip(*args[1:])) args = [list(a) for a in args] # now", ") return _args args = [arg_tuple(i) for i in range(num_mixtures)]", "no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) _reset_event_spec(sc) check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc,", "event_parameters) for label in labels: try: sc.add_event( label=('const', label), source_file=('const',", "sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, ) sc.fg_spec =", "for {label} @ {_source_file}. Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'),", "= sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event def make_one_mixture_coherent(sc, path_to_file, labels,", "= make_one_mixture_coherent if coherent else make_one_mixture def arg_tuple(i): _args =", "event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed)", "pitch_shift=('const', event.pitch_shift), time_stretch=('const', event.time_stretch) ) except: logging.exception( f\"Got an error", "to the event parameters for each source. \"\"\" check =", "= ref_db sc.sr = sample_rate sc.bitdepth = bitdepth generators.append(sc) scaper_seed", "True def make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\" Creates a", "mix_func(*args[0]) args = list(zip(*args[1:])) args = [list(a) for a in", "background_path = foreground_path for i in range(num_mixtures): sc = Scaper(", "in labels: try: sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const',", "event_parameters): _reset_event_spec(sc) _event_parameters = copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters)", "not check: sc, event = instantiate_and_get_event_spec( sc, labels[0], event_parameters) for", "master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event def", "sc = Scaper( scene_duration, fg_path=foreground_path, bg_path=background_path, random_state=scaper_seed, ) sc.ref_db =", "Creates a single mixture, incoherent. Instantiates according to the event", "single mixture, incoherent. Instantiates according to the event parameters for", "False return True def make_one_mixture(sc, path_to_file, num_sources, event_parameters, allow_repeated_label): \"\"\"", "def check_mixture(path_to_mix): mix_signal = nussl.AudioSignal(path_to_mix) if mix_signal.rms() < .01: return", "event_parameters, allow_repeated_label): \"\"\" Creates a single mixture, incoherent. Instantiates according", "labels if coherent else num_sources, event_parameters, allow_repeated_label ) return _args", "one by itself for testing mix_func(*args[0]) args = list(zip(*args[1:])) args", "mixture, incoherent. Instantiates according to the event parameters for each", "= copy.deepcopy(event_parameters) _event_parameters['label'] = ('const', master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1])", "args = [arg_tuple(i) for i in range(num_mixtures)] # do one", "labels: try: sc.add_event( label=('const', label), source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time),", "sc.fg_spec = [] check = check_mixture(path_to_file) @gin.configurable def make_scaper_datasets(scopes=['train', 'val']):", "raise ValueError(\"One of labels or num_sources must be set!\") if", "# do one by itself for testing mix_func(*args[0]) args =", "Moving on...\") sc.generate( path_to_file, path_to_file.replace('.wav', '.jams'), no_audio=False, allow_repeated_label=allow_repeated_label, save_isolated_events=True, )", "allow_repeated_label): check = False while not check: sc, event =", "allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed =", "ref_db=-40, bitdepth=16, seed=0, num_workers=1): nussl.utils.seed(seed) os.makedirs(target_folder, exist_ok=True) scaper_seed = np.random.randint(100)", "foreground_path, background_path, scene_duration, sample_rate, target_folder, event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False,", "numpy as np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix): mix_signal", "mix_func = make_one_mixture_coherent if coherent else make_one_mixture def arg_tuple(i): _args", "args = list(zip(*args[1:])) args = [list(a) for a in args]", "< .01: return False return True def make_one_mixture(sc, path_to_file, num_sources,", "sample_rate, target_folder, event_parameters, num_sources=None, labels=None, coherent=False, allow_repeated_label=False, ref_db=-40, bitdepth=16, seed=0,", "or num_sources must be set!\") if coherent and labels is", "source_file=('const', event.source_file.replace(labels[0], label)), source_time=('const', event.source_time), event_time=('const', 0), event_duration=('const', sc.duration), snr=event_parameters['snr'],", "random_state=scaper_seed, ) sc.ref_db = ref_db sc.sr = sample_rate sc.bitdepth =", "each source. \"\"\" check = False while not check: for", "import numpy as np def _reset_event_spec(sc): sc.reset_fg_event_spec() sc.reset_bg_event_spec() def check_mixture(path_to_mix):", "('const', master_label) sc.add_event(**_event_parameters) event = sc._instantiate_event(sc.fg_spec[-1]) _reset_event_spec(sc) return sc, event", "save_isolated_events=True, ) _reset_event_spec(sc) check = check_mixture(path_to_file) def instantiate_and_get_event_spec(sc, master_label, event_parameters):", "generators.append(sc) scaper_seed += 1 mix_func = make_one_mixture_coherent if coherent else", "event.pitch_shift), time_stretch=('const', event.time_stretch) ) except: logging.exception( f\"Got an error for", "if num_sources is None and labels is None: raise ValueError(\"One", "save_isolated_events=True, ) sc.fg_spec = [] check = check_mixture(path_to_file) @gin.configurable def", "num_sources must be set!\") if coherent and labels is None:", "instantiate_and_get_event_spec( sc, labels[0], event_parameters) for label in labels: try: sc.add_event(" ]